1 // cmd/9l/optab.c, cmd/9l/asmout.c from Vita Nuova.
3 // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
4 // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
5 // Portions Copyright © 1997-1999 Vita Nuova Limited
6 // Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com)
7 // Portions Copyright © 2004,2006 Bruce Ellis
8 // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
9 // Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others
10 // Portions Copyright © 2009 The Go Authors. All rights reserved.
12 // Permission is hereby granted, free of charge, to any person obtaining a copy
13 // of this software and associated documentation files (the "Software"), to deal
14 // in the Software without restriction, including without limitation the rights
15 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
16 // copies of the Software, and to permit persons to whom the Software is
17 // furnished to do so, subject to the following conditions:
19 // The above copyright notice and this permission notice shall be included in
20 // all copies or substantial portions of the Software.
22 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
27 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
42 // ctxt9 holds state while assembling a single function.
43 // Each function gets a fresh ctxt9.
44 // This allows for multiple functions to be safely concurrently assembled.
54 // Instruction layout.
58 funcAlignMask = funcAlign - 1
67 a1 uint8 // p.From argument (obj.Addr). p is of type obj.Prog.
68 a2 uint8 // p.Reg argument (int16 Register)
69 a3 uint8 // p.RestArgs[0] (obj.AddrPos)
70 a4 uint8 // p.RestArgs[1]
71 a5 uint8 // p.RestARgs[2]
72 a6 uint8 // p.To (obj.Addr)
73 type_ int8 // cases in asmout below. E.g., 44 = st r,(ra+rb); 45 = ld (ra+rb), r
74 size int8 // Text space in bytes to lay operation
77 // optab contains an array to be sliced of accepted operand combinations for an
78 // instruction. Unused arguments and fields are not explicitly enumerated, and
79 // should not be listed for clarity. Unused arguments and values should always
80 // assume the default value for the given type.
82 // optab does not list every valid ppc64 opcode, it enumerates representative
83 // operand combinations for a class of instruction. The variable oprange indexes
84 // all valid ppc64 opcodes.
86 // oprange is initialized to point a slice within optab which contains the valid
87 // operand combinations for a given instruction. This is initialized from buildop.
89 // Likewise, each slice of optab is dynamically sorted using the ocmp Sort interface
90 // to arrange entries to minimize text size of each opcode.
92 {as: obj.ATEXT, a1: C_LOREG, a6: C_TEXTSIZE, type_: 0, size: 0},
93 {as: obj.ATEXT, a1: C_LOREG, a3: C_LCON, a6: C_TEXTSIZE, type_: 0, size: 0},
94 {as: obj.ATEXT, a1: C_ADDR, a6: C_TEXTSIZE, type_: 0, size: 0},
95 {as: obj.ATEXT, a1: C_ADDR, a3: C_LCON, a6: C_TEXTSIZE, type_: 0, size: 0},
97 {as: AADD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
98 {as: AADD, a1: C_REG, a6: C_REG, type_: 2, size: 4},
99 {as: AADD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
100 {as: AADD, a1: C_SCON, a6: C_REG, type_: 4, size: 4},
101 {as: AADD, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
102 {as: AADD, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
103 {as: AADD, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 20, size: 4},
104 {as: AADD, a1: C_UCON, a6: C_REG, type_: 20, size: 4},
105 {as: AADD, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 22, size: 8},
106 {as: AADD, a1: C_ANDCON, a6: C_REG, type_: 22, size: 8},
107 {as: AADD, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12},
108 {as: AADD, a1: C_LCON, a6: C_REG, type_: 22, size: 12},
109 {as: AADDIS, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 20, size: 4},
110 {as: AADDIS, a1: C_ADDCON, a6: C_REG, type_: 20, size: 4},
111 {as: AADDC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
112 {as: AADDC, a1: C_REG, a6: C_REG, type_: 2, size: 4},
113 {as: AADDC, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
114 {as: AADDC, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
115 {as: AADDC, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12},
116 {as: AADDC, a1: C_LCON, a6: C_REG, type_: 22, size: 12},
117 {as: AAND, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, /* logical, no literal */
118 {as: AAND, a1: C_REG, a6: C_REG, type_: 6, size: 4},
119 {as: AANDCC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
120 {as: AANDCC, a1: C_REG, a6: C_REG, type_: 6, size: 4},
121 {as: AANDCC, a1: C_ANDCON, a6: C_REG, type_: 58, size: 4},
122 {as: AANDCC, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 58, size: 4},
123 {as: AANDCC, a1: C_UCON, a6: C_REG, type_: 59, size: 4},
124 {as: AANDCC, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
125 {as: AANDCC, a1: C_ADDCON, a6: C_REG, type_: 23, size: 8},
126 {as: AANDCC, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 23, size: 8},
127 {as: AANDCC, a1: C_LCON, a6: C_REG, type_: 23, size: 12},
128 {as: AANDCC, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 23, size: 12},
129 {as: AANDISCC, a1: C_ANDCON, a6: C_REG, type_: 59, size: 4},
130 {as: AANDISCC, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
131 {as: AMULLW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
132 {as: AMULLW, a1: C_REG, a6: C_REG, type_: 2, size: 4},
133 {as: AMULLW, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
134 {as: AMULLW, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
135 {as: AMULLW, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
136 {as: AMULLW, a1: C_ANDCON, a6: C_REG, type_: 4, size: 4},
137 {as: AMULLW, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12},
138 {as: AMULLW, a1: C_LCON, a6: C_REG, type_: 22, size: 12},
139 {as: ASUBC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 10, size: 4},
140 {as: ASUBC, a1: C_REG, a6: C_REG, type_: 10, size: 4},
141 {as: ASUBC, a1: C_REG, a3: C_ADDCON, a6: C_REG, type_: 27, size: 4},
142 {as: ASUBC, a1: C_REG, a3: C_LCON, a6: C_REG, type_: 28, size: 12},
143 {as: AOR, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, /* logical, literal not cc (or/xor) */
144 {as: AOR, a1: C_REG, a6: C_REG, type_: 6, size: 4},
145 {as: AOR, a1: C_ANDCON, a6: C_REG, type_: 58, size: 4},
146 {as: AOR, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 58, size: 4},
147 {as: AOR, a1: C_UCON, a6: C_REG, type_: 59, size: 4},
148 {as: AOR, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
149 {as: AOR, a1: C_ADDCON, a6: C_REG, type_: 23, size: 8},
150 {as: AOR, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 23, size: 8},
151 {as: AOR, a1: C_LCON, a6: C_REG, type_: 23, size: 12},
152 {as: AOR, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 23, size: 12},
153 {as: AORIS, a1: C_ANDCON, a6: C_REG, type_: 59, size: 4},
154 {as: AORIS, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
155 {as: ADIVW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4}, /* op r1[,r2],r3 */
156 {as: ADIVW, a1: C_REG, a6: C_REG, type_: 2, size: 4},
157 {as: ASUB, a1: C_REG, a2: C_REG, a6: C_REG, type_: 10, size: 4}, /* op r2[,r1],r3 */
158 {as: ASUB, a1: C_REG, a6: C_REG, type_: 10, size: 4},
159 {as: ASLW, a1: C_REG, a6: C_REG, type_: 6, size: 4},
160 {as: ASLW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
161 {as: ASLD, a1: C_REG, a6: C_REG, type_: 6, size: 4},
162 {as: ASLD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
163 {as: ASLD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 25, size: 4},
164 {as: ASLD, a1: C_SCON, a6: C_REG, type_: 25, size: 4},
165 {as: AEXTSWSLI, a1: C_SCON, a6: C_REG, type_: 25, size: 4},
166 {as: AEXTSWSLI, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 25, size: 4},
167 {as: ASLW, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 57, size: 4},
168 {as: ASLW, a1: C_SCON, a6: C_REG, type_: 57, size: 4},
169 {as: ASRAW, a1: C_REG, a6: C_REG, type_: 6, size: 4},
170 {as: ASRAW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
171 {as: ASRAW, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 56, size: 4},
172 {as: ASRAW, a1: C_SCON, a6: C_REG, type_: 56, size: 4},
173 {as: ASRAD, a1: C_REG, a6: C_REG, type_: 6, size: 4},
174 {as: ASRAD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
175 {as: ASRAD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 56, size: 4},
176 {as: ASRAD, a1: C_SCON, a6: C_REG, type_: 56, size: 4},
177 {as: ARLWMI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 62, size: 4},
178 {as: ARLWMI, a1: C_SCON, a2: C_REG, a3: C_SCON, a4: C_SCON, a6: C_REG, type_: 102, size: 4},
179 {as: ARLWMI, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 63, size: 4},
180 {as: ARLWMI, a1: C_REG, a2: C_REG, a3: C_SCON, a4: C_SCON, a6: C_REG, type_: 103, size: 4},
181 {as: ACLRLSLWI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 62, size: 4},
182 {as: ARLDMI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 30, size: 4},
183 {as: ARLDC, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 29, size: 4},
184 {as: ARLDCL, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 29, size: 4},
185 {as: ARLDCL, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
186 {as: ARLDICL, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
187 {as: ARLDICL, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
188 {as: ARLDCL, a1: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
189 {as: AFADD, a1: C_FREG, a6: C_FREG, type_: 2, size: 4},
190 {as: AFADD, a1: C_FREG, a2: C_FREG, a6: C_FREG, type_: 2, size: 4},
191 {as: AFABS, a1: C_FREG, a6: C_FREG, type_: 33, size: 4},
192 {as: AFABS, a6: C_FREG, type_: 33, size: 4},
193 {as: AFMADD, a1: C_FREG, a2: C_FREG, a3: C_FREG, a6: C_FREG, type_: 34, size: 4},
194 {as: AFMUL, a1: C_FREG, a6: C_FREG, type_: 32, size: 4},
195 {as: AFMUL, a1: C_FREG, a2: C_FREG, a6: C_FREG, type_: 32, size: 4},
197 {as: AMOVBU, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
198 {as: AMOVBU, a1: C_SOREG, a6: C_REG, type_: 8, size: 8},
200 {as: AMOVBZU, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
201 {as: AMOVBZU, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
203 {as: AMOVHBR, a1: C_REG, a6: C_ZOREG, type_: 44, size: 4},
204 {as: AMOVHBR, a1: C_ZOREG, a6: C_REG, type_: 45, size: 4},
206 {as: AMOVB, a1: C_ADDR, a6: C_REG, type_: 75, size: 12},
207 {as: AMOVB, a1: C_LOREG, a6: C_REG, type_: 36, size: 12},
208 {as: AMOVB, a1: C_SOREG, a6: C_REG, type_: 8, size: 8},
209 {as: AMOVB, a1: C_REG, a6: C_ADDR, type_: 74, size: 8},
210 {as: AMOVB, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
211 {as: AMOVB, a1: C_REG, a6: C_LOREG, type_: 35, size: 8},
212 {as: AMOVB, a1: C_REG, a6: C_REG, type_: 13, size: 4},
214 {as: AMOVBZ, a1: C_ADDR, a6: C_REG, type_: 75, size: 8},
215 {as: AMOVBZ, a1: C_LOREG, a6: C_REG, type_: 36, size: 8},
216 {as: AMOVBZ, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
217 {as: AMOVBZ, a1: C_REG, a6: C_ADDR, type_: 74, size: 8},
218 {as: AMOVBZ, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
219 {as: AMOVBZ, a1: C_REG, a6: C_LOREG, type_: 35, size: 8},
220 {as: AMOVBZ, a1: C_REG, a6: C_REG, type_: 13, size: 4},
222 {as: AMOVD, a1: C_ADDCON, a6: C_REG, type_: 3, size: 4},
223 {as: AMOVD, a1: C_ANDCON, a6: C_REG, type_: 3, size: 4},
224 {as: AMOVD, a1: C_UCON, a6: C_REG, type_: 3, size: 4},
225 {as: AMOVD, a1: C_LCON, a6: C_REG, type_: 19, size: 8},
226 {as: AMOVD, a1: C_SACON, a6: C_REG, type_: 3, size: 4},
227 {as: AMOVD, a1: C_LACON, a6: C_REG, type_: 26, size: 8},
228 {as: AMOVD, a1: C_ADDR, a6: C_REG, type_: 75, size: 8},
229 {as: AMOVD, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
230 {as: AMOVD, a1: C_LOREG, a6: C_REG, type_: 36, size: 8},
231 {as: AMOVD, a1: C_TLS_LE, a6: C_REG, type_: 79, size: 8},
232 {as: AMOVD, a1: C_TLS_IE, a6: C_REG, type_: 80, size: 12},
233 {as: AMOVD, a1: C_SPR, a6: C_REG, type_: 66, size: 4},
234 {as: AMOVD, a1: C_REG, a6: C_ADDR, type_: 74, size: 8},
235 {as: AMOVD, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
236 {as: AMOVD, a1: C_REG, a6: C_LOREG, type_: 35, size: 8},
237 {as: AMOVD, a1: C_REG, a6: C_SPR, type_: 66, size: 4},
238 {as: AMOVD, a1: C_REG, a6: C_REG, type_: 13, size: 4},
240 {as: AMOVW, a1: C_ADDCON, a6: C_REG, type_: 3, size: 4},
241 {as: AMOVW, a1: C_ANDCON, a6: C_REG, type_: 3, size: 4},
242 {as: AMOVW, a1: C_UCON, a6: C_REG, type_: 3, size: 4},
243 {as: AMOVW, a1: C_LCON, a6: C_REG, type_: 19, size: 8},
244 {as: AMOVW, a1: C_SACON, a6: C_REG, type_: 3, size: 4},
245 {as: AMOVW, a1: C_LACON, a6: C_REG, type_: 26, size: 8},
246 {as: AMOVW, a1: C_ADDR, a6: C_REG, type_: 75, size: 8},
247 {as: AMOVW, a1: C_CREG, a6: C_REG, type_: 68, size: 4},
248 {as: AMOVW, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
249 {as: AMOVW, a1: C_LOREG, a6: C_REG, type_: 36, size: 8},
250 {as: AMOVW, a1: C_SPR, a6: C_REG, type_: 66, size: 4},
251 {as: AMOVW, a1: C_REG, a6: C_ADDR, type_: 74, size: 8},
252 {as: AMOVW, a1: C_REG, a6: C_CREG, type_: 69, size: 4},
253 {as: AMOVW, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
254 {as: AMOVW, a1: C_REG, a6: C_LOREG, type_: 35, size: 8},
255 {as: AMOVW, a1: C_REG, a6: C_SPR, type_: 66, size: 4},
256 {as: AMOVW, a1: C_REG, a6: C_REG, type_: 13, size: 4},
258 {as: AFMOVD, a1: C_ADDCON, a6: C_FREG, type_: 24, size: 8},
259 {as: AFMOVD, a1: C_SOREG, a6: C_FREG, type_: 8, size: 4},
260 {as: AFMOVD, a1: C_LOREG, a6: C_FREG, type_: 36, size: 8},
261 {as: AFMOVD, a1: C_ZCON, a6: C_FREG, type_: 24, size: 4},
262 {as: AFMOVD, a1: C_ADDR, a6: C_FREG, type_: 75, size: 8},
263 {as: AFMOVD, a1: C_FREG, a6: C_FREG, type_: 33, size: 4},
264 {as: AFMOVD, a1: C_FREG, a6: C_SOREG, type_: 7, size: 4},
265 {as: AFMOVD, a1: C_FREG, a6: C_LOREG, type_: 35, size: 8},
266 {as: AFMOVD, a1: C_FREG, a6: C_ADDR, type_: 74, size: 8},
268 {as: AFMOVSX, a1: C_ZOREG, a6: C_FREG, type_: 45, size: 4},
269 {as: AFMOVSX, a1: C_FREG, a6: C_ZOREG, type_: 44, size: 4},
271 {as: AFMOVSZ, a1: C_ZOREG, a6: C_FREG, type_: 45, size: 4},
273 {as: AMOVFL, a1: C_CREG, a6: C_CREG, type_: 67, size: 4},
274 {as: AMOVFL, a1: C_FPSCR, a6: C_CREG, type_: 73, size: 4},
275 {as: AMOVFL, a1: C_FPSCR, a6: C_FREG, type_: 53, size: 4},
276 {as: AMOVFL, a1: C_FREG, a3: C_LCON, a6: C_FPSCR, type_: 64, size: 4},
277 {as: AMOVFL, a1: C_FREG, a6: C_FPSCR, type_: 64, size: 4},
278 {as: AMOVFL, a1: C_LCON, a6: C_FPSCR, type_: 65, size: 4},
279 {as: AMOVFL, a1: C_REG, a6: C_CREG, type_: 69, size: 4},
280 {as: AMOVFL, a1: C_REG, a6: C_LCON, type_: 69, size: 4},
282 {as: ASYSCALL, type_: 5, size: 4},
283 {as: ASYSCALL, a1: C_REG, type_: 77, size: 12},
284 {as: ASYSCALL, a1: C_SCON, type_: 77, size: 12},
285 {as: ABEQ, a6: C_SBRA, type_: 16, size: 4},
286 {as: ABEQ, a1: C_CREG, a6: C_SBRA, type_: 16, size: 4},
287 {as: ABR, a6: C_LBRA, type_: 11, size: 4},
288 {as: ABR, a6: C_LBRAPIC, type_: 11, size: 8},
289 {as: ABC, a1: C_SCON, a2: C_REG, a6: C_SBRA, type_: 16, size: 4},
290 {as: ABC, a1: C_SCON, a2: C_REG, a6: C_LBRA, type_: 17, size: 4},
291 {as: ABR, a6: C_LR, type_: 18, size: 4},
292 {as: ABR, a3: C_SCON, a6: C_LR, type_: 18, size: 4},
293 {as: ABR, a6: C_CTR, type_: 18, size: 4},
294 {as: ABR, a1: C_REG, a6: C_CTR, type_: 18, size: 4},
295 {as: ABR, a6: C_ZOREG, type_: 15, size: 8},
296 {as: ABC, a2: C_REG, a6: C_LR, type_: 18, size: 4},
297 {as: ABC, a2: C_REG, a6: C_CTR, type_: 18, size: 4},
298 {as: ABC, a1: C_SCON, a2: C_REG, a6: C_LR, type_: 18, size: 4},
299 {as: ABC, a1: C_SCON, a2: C_REG, a6: C_CTR, type_: 18, size: 4},
300 {as: ABC, a6: C_ZOREG, type_: 15, size: 8},
301 {as: ASYNC, type_: 46, size: 4},
302 {as: AWORD, a1: C_LCON, type_: 40, size: 4},
303 {as: ADWORD, a1: C_LCON, type_: 31, size: 8},
304 {as: ADWORD, a1: C_DCON, type_: 31, size: 8},
305 {as: ADWORD, a1: C_LACON, type_: 31, size: 8},
306 {as: AADDME, a1: C_REG, a6: C_REG, type_: 47, size: 4},
307 {as: AEXTSB, a1: C_REG, a6: C_REG, type_: 48, size: 4},
308 {as: AEXTSB, a6: C_REG, type_: 48, size: 4},
309 {as: AISEL, a1: C_LCON, a2: C_REG, a3: C_REG, a6: C_REG, type_: 84, size: 4},
310 {as: AISEL, a1: C_ZCON, a2: C_REG, a3: C_REG, a6: C_REG, type_: 84, size: 4},
311 {as: ANEG, a1: C_REG, a6: C_REG, type_: 47, size: 4},
312 {as: ANEG, a6: C_REG, type_: 47, size: 4},
313 {as: AREM, a1: C_REG, a6: C_REG, type_: 50, size: 12},
314 {as: AREM, a1: C_REG, a2: C_REG, a6: C_REG, type_: 50, size: 12},
315 {as: AREMU, a1: C_REG, a6: C_REG, type_: 50, size: 16},
316 {as: AREMU, a1: C_REG, a2: C_REG, a6: C_REG, type_: 50, size: 16},
317 {as: AREMD, a1: C_REG, a6: C_REG, type_: 51, size: 12},
318 {as: AREMD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 51, size: 12},
319 {as: AMTFSB0, a1: C_SCON, type_: 52, size: 4},
320 /* Other ISA 2.05+ instructions */
321 {as: APOPCNTD, a1: C_REG, a6: C_REG, type_: 93, size: 4}, /* population count, x-form */
322 {as: ACMPB, a1: C_REG, a2: C_REG, a6: C_REG, type_: 92, size: 4}, /* compare byte, x-form */
323 {as: ACMPEQB, a1: C_REG, a2: C_REG, a6: C_CREG, type_: 92, size: 4}, /* compare equal byte, x-form, ISA 3.0 */
324 {as: ACMPEQB, a1: C_REG, a6: C_REG, type_: 70, size: 4},
325 {as: AFTDIV, a1: C_FREG, a2: C_FREG, a6: C_SCON, type_: 92, size: 4}, /* floating test for sw divide, x-form */
326 {as: AFTSQRT, a1: C_FREG, a6: C_SCON, type_: 93, size: 4}, /* floating test for sw square root, x-form */
327 {as: ACOPY, a1: C_REG, a6: C_REG, type_: 92, size: 4}, /* copy/paste facility, x-form */
328 {as: ADARN, a1: C_SCON, a6: C_REG, type_: 92, size: 4}, /* deliver random number, x-form */
329 {as: ALDMX, a1: C_SOREG, a6: C_REG, type_: 45, size: 4}, /* load doubleword monitored, x-form */
330 {as: AMADDHD, a1: C_REG, a2: C_REG, a3: C_REG, a6: C_REG, type_: 83, size: 4}, /* multiply-add high/low doubleword, va-form */
331 {as: AADDEX, a1: C_REG, a2: C_REG, a3: C_SCON, a6: C_REG, type_: 94, size: 4}, /* add extended using alternate carry, z23-form */
332 {as: ACRAND, a1: C_CREG, a6: C_CREG, type_: 2, size: 4}, /* logical ops for condition registers xl-form */
334 /* Vector instructions */
337 {as: ALV, a1: C_SOREG, a6: C_VREG, type_: 45, size: 4}, /* vector load, x-form */
340 {as: ASTV, a1: C_VREG, a6: C_SOREG, type_: 44, size: 4}, /* vector store, x-form */
343 {as: AVAND, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector and, vx-form */
344 {as: AVOR, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector or, vx-form */
347 {as: AVADDUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add unsigned modulo, vx-form */
348 {as: AVADDCU, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add & write carry unsigned, vx-form */
349 {as: AVADDUS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add unsigned saturate, vx-form */
350 {as: AVADDSS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add signed saturate, vx-form */
351 {as: AVADDE, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector add extended, va-form */
353 /* Vector subtract */
354 {as: AVSUBUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract unsigned modulo, vx-form */
355 {as: AVSUBCU, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract & write carry unsigned, vx-form */
356 {as: AVSUBUS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract unsigned saturate, vx-form */
357 {as: AVSUBSS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract signed saturate, vx-form */
358 {as: AVSUBE, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector subtract extended, va-form */
360 /* Vector multiply */
361 {as: AVMULESB, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector multiply, vx-form */
362 {as: AVPMSUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector polynomial multiply & sum, vx-form */
363 {as: AVMSUMUDM, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector multiply-sum, va-form */
366 {as: AVR, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector rotate, vx-form */
369 {as: AVS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector shift, vx-form */
370 {as: AVSA, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector shift algebraic, vx-form */
371 {as: AVSOI, a1: C_ANDCON, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector shift by octet immediate, va-form */
374 {as: AVCLZ, a1: C_VREG, a6: C_VREG, type_: 85, size: 4}, /* vector count leading zeros, vx-form */
375 {as: AVPOPCNT, a1: C_VREG, a6: C_VREG, type_: 85, size: 4}, /* vector population count, vx-form */
378 {as: AVCMPEQ, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare equal, vc-form */
379 {as: AVCMPGT, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare greater than, vc-form */
380 {as: AVCMPNEZB, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare not equal, vx-form */
383 {as: AVMRGOW, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector merge odd word, vx-form */
386 {as: AVPERM, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector permute, va-form */
388 /* Vector bit permute */
389 {as: AVBPERMQ, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector bit permute, vx-form */
392 {as: AVSEL, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector select, va-form */
395 {as: AVSPLTB, a1: C_SCON, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector splat, vx-form */
396 {as: AVSPLTB, a1: C_ADDCON, a2: C_VREG, a6: C_VREG, type_: 82, size: 4},
397 {as: AVSPLTISB, a1: C_SCON, a6: C_VREG, type_: 82, size: 4}, /* vector splat immediate, vx-form */
398 {as: AVSPLTISB, a1: C_ADDCON, a6: C_VREG, type_: 82, size: 4},
401 {as: AVCIPH, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES cipher, vx-form */
402 {as: AVNCIPH, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES inverse cipher, vx-form */
403 {as: AVSBOX, a1: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES subbytes, vx-form */
406 {as: AVSHASIGMA, a1: C_ANDCON, a2: C_VREG, a3: C_ANDCON, a6: C_VREG, type_: 82, size: 4}, /* vector SHA sigma, vx-form */
408 /* VSX vector load */
409 {as: ALXVD2X, a1: C_SOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx vector load, xx1-form */
410 {as: ALXV, a1: C_SOREG, a6: C_VSREG, type_: 96, size: 4}, /* vsx vector load, dq-form */
411 {as: ALXVL, a1: C_REG, a2: C_REG, a6: C_VSREG, type_: 98, size: 4}, /* vsx vector load length */
413 /* VSX vector store */
414 {as: ASTXVD2X, a1: C_VSREG, a6: C_SOREG, type_: 86, size: 4}, /* vsx vector store, xx1-form */
415 {as: ASTXV, a1: C_VSREG, a6: C_SOREG, type_: 97, size: 4}, /* vsx vector store, dq-form */
416 {as: ASTXVL, a1: C_VSREG, a2: C_REG, a6: C_REG, type_: 99, size: 4}, /* vsx vector store with length x-form */
418 /* VSX scalar load */
419 {as: ALXSDX, a1: C_SOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx scalar load, xx1-form */
421 /* VSX scalar store */
422 {as: ASTXSDX, a1: C_VSREG, a6: C_SOREG, type_: 86, size: 4}, /* vsx scalar store, xx1-form */
424 /* VSX scalar as integer load */
425 {as: ALXSIWAX, a1: C_SOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx scalar as integer load, xx1-form */
427 /* VSX scalar store as integer */
428 {as: ASTXSIWX, a1: C_VSREG, a6: C_SOREG, type_: 86, size: 4}, /* vsx scalar as integer store, xx1-form */
430 /* VSX move from VSR */
431 {as: AMFVSRD, a1: C_VSREG, a6: C_REG, type_: 88, size: 4},
432 {as: AMFVSRD, a1: C_FREG, a6: C_REG, type_: 88, size: 4},
434 /* VSX move to VSR */
435 {as: AMTVSRD, a1: C_REG, a6: C_VSREG, type_: 104, size: 4},
436 {as: AMTVSRD, a1: C_REG, a6: C_FREG, type_: 104, size: 4},
437 {as: AMTVSRDD, a1: C_REG, a2: C_REG, a6: C_VSREG, type_: 104, size: 4},
440 {as: AXXLAND, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx and, xx3-form */
441 {as: AXXLOR, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx or, xx3-form */
444 {as: AXXSEL, a1: C_VSREG, a2: C_VSREG, a3: C_VSREG, a6: C_VSREG, type_: 91, size: 4}, /* vsx select, xx4-form */
447 {as: AXXMRGHW, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx merge, xx3-form */
450 {as: AXXSPLTW, a1: C_VSREG, a3: C_SCON, a6: C_VSREG, type_: 89, size: 4}, /* vsx splat, xx2-form */
451 {as: AXXSPLTIB, a1: C_SCON, a6: C_VSREG, type_: 100, size: 4}, /* vsx splat, xx2-form */
454 {as: AXXPERM, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx permute, xx3-form */
457 {as: AXXSLDWI, a1: C_VSREG, a2: C_VSREG, a3: C_SCON, a6: C_VSREG, type_: 90, size: 4}, /* vsx shift immediate, xx3-form */
459 /* VSX reverse bytes */
460 {as: AXXBRQ, a1: C_VSREG, a6: C_VSREG, type_: 101, size: 4}, /* vsx reverse bytes */
462 /* VSX scalar FP-FP conversion */
463 {as: AXSCVDPSP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar fp-fp conversion, xx2-form */
465 /* VSX vector FP-FP conversion */
466 {as: AXVCVDPSP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector fp-fp conversion, xx2-form */
468 /* VSX scalar FP-integer conversion */
469 {as: AXSCVDPSXDS, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar fp-integer conversion, xx2-form */
471 /* VSX scalar integer-FP conversion */
472 {as: AXSCVSXDDP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar integer-fp conversion, xx2-form */
474 /* VSX vector FP-integer conversion */
475 {as: AXVCVDPSXDS, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector fp-integer conversion, xx2-form */
477 /* VSX vector integer-FP conversion */
478 {as: AXVCVSXDDP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector integer-fp conversion, xx2-form */
480 {as: ACMP, a1: C_REG, a6: C_REG, type_: 70, size: 4},
481 {as: ACMP, a1: C_REG, a2: C_REG, a6: C_REG, type_: 70, size: 4},
482 {as: ACMP, a1: C_REG, a6: C_ADDCON, type_: 71, size: 4},
483 {as: ACMP, a1: C_REG, a2: C_REG, a6: C_ADDCON, type_: 71, size: 4},
484 {as: ACMPU, a1: C_REG, a6: C_REG, type_: 70, size: 4},
485 {as: ACMPU, a1: C_REG, a2: C_REG, a6: C_REG, type_: 70, size: 4},
486 {as: ACMPU, a1: C_REG, a6: C_ANDCON, type_: 71, size: 4},
487 {as: ACMPU, a1: C_REG, a2: C_REG, a6: C_ANDCON, type_: 71, size: 4},
488 {as: AFCMPO, a1: C_FREG, a6: C_FREG, type_: 70, size: 4},
489 {as: AFCMPO, a1: C_FREG, a2: C_REG, a6: C_FREG, type_: 70, size: 4},
490 {as: ATW, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 60, size: 4},
491 {as: ATW, a1: C_LCON, a2: C_REG, a6: C_ADDCON, type_: 61, size: 4},
492 {as: ADCBF, a1: C_ZOREG, type_: 43, size: 4},
493 {as: ADCBF, a1: C_SOREG, type_: 43, size: 4},
494 {as: ADCBF, a1: C_ZOREG, a2: C_REG, a6: C_SCON, type_: 43, size: 4},
495 {as: ADCBF, a1: C_SOREG, a6: C_SCON, type_: 43, size: 4},
496 {as: AECOWX, a1: C_REG, a2: C_REG, a6: C_ZOREG, type_: 44, size: 4},
497 {as: AECIWX, a1: C_ZOREG, a2: C_REG, a6: C_REG, type_: 45, size: 4},
498 {as: AECOWX, a1: C_REG, a6: C_ZOREG, type_: 44, size: 4},
499 {as: AECIWX, a1: C_ZOREG, a6: C_REG, type_: 45, size: 4},
500 {as: ALDAR, a1: C_ZOREG, a6: C_REG, type_: 45, size: 4},
501 {as: ALDAR, a1: C_ZOREG, a3: C_ANDCON, a6: C_REG, type_: 45, size: 4},
502 {as: AEIEIO, type_: 46, size: 4},
503 {as: ATLBIE, a1: C_REG, type_: 49, size: 4},
504 {as: ATLBIE, a1: C_SCON, a6: C_REG, type_: 49, size: 4},
505 {as: ASLBMFEE, a1: C_REG, a6: C_REG, type_: 55, size: 4},
506 {as: ASLBMTE, a1: C_REG, a6: C_REG, type_: 55, size: 4},
507 {as: ASTSW, a1: C_REG, a6: C_ZOREG, type_: 44, size: 4},
508 {as: ASTSW, a1: C_REG, a3: C_LCON, a6: C_ZOREG, type_: 41, size: 4},
509 {as: ALSW, a1: C_ZOREG, a6: C_REG, type_: 45, size: 4},
510 {as: ALSW, a1: C_ZOREG, a3: C_LCON, a6: C_REG, type_: 42, size: 4},
511 {as: obj.AUNDEF, type_: 78, size: 4},
512 {as: obj.APCDATA, a1: C_LCON, a6: C_LCON, type_: 0, size: 0},
513 {as: obj.AFUNCDATA, a1: C_SCON, a6: C_ADDR, type_: 0, size: 0},
514 {as: obj.ANOP, type_: 0, size: 0},
515 {as: obj.ANOP, a1: C_LCON, type_: 0, size: 0}, // NOP operand variations added for #40689
516 {as: obj.ANOP, a1: C_REG, type_: 0, size: 0}, // to preserve previous behavior
517 {as: obj.ANOP, a1: C_FREG, type_: 0, size: 0},
518 {as: obj.ADUFFZERO, a6: C_LBRA, type_: 11, size: 4}, // same as ABR/ABL
519 {as: obj.ADUFFCOPY, a6: C_LBRA, type_: 11, size: 4}, // same as ABR/ABL
520 {as: obj.APCALIGN, a1: C_LCON, type_: 0, size: 0}, // align code
522 {as: obj.AXXX, type_: 0, size: 4},
525 var oprange [ALAST & obj.AMask][]Optab
527 var xcmp [C_NCLASS][C_NCLASS]bool
529 // padding bytes to add to align code as requested
530 func addpad(pc, a int64, ctxt *obj.Link, cursym *obj.LSym) int {
531 // For 16 and 32 byte alignment, there is a tradeoff
532 // between aligning the code and adding too many NOPs.
539 // Align to 16 bytes if possible but add at
548 // Align to 32 bytes if possible but add at
558 // When 32 byte alignment is requested on Linux,
559 // promote the function's alignment to 32. On AIX
560 // the function alignment is not changed which might
561 // result in 16 byte alignment but that is still fine.
562 // TODO: alignment on AIX
563 if ctxt.Headtype != objabi.Haix && cursym.Func().Align < 32 {
564 cursym.Func().Align = 32
567 ctxt.Diag("Unexpected alignment: %d for PCALIGN directive\n", a)
572 // Get the implied register of a operand which doesn't specify one. These show up
573 // in handwritten asm like "MOVD R5, foosymbol" where a base register is not supplied,
574 // or "MOVD R5, foo+10(SP) or pseudo-register is used. The other common case is when
575 // generating constants in register like "MOVD $constant, Rx".
576 func (c *ctxt9) getimpliedreg(a *obj.Addr, p *obj.Prog) int {
578 case C_ADDCON, C_ANDCON, C_UCON, C_LCON, C_SCON, C_ZCON:
580 case C_SACON, C_LACON:
582 case C_LOREG, C_SOREG, C_ZOREG:
584 case obj.NAME_EXTERN, obj.NAME_STATIC:
586 case obj.NAME_AUTO, obj.NAME_PARAM:
592 c.ctxt.Diag("failed to determine implied reg for class %v (%v)", DRconv(oclass(a)), p)
596 func span9(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
597 p := cursym.Func().Text
598 if p == nil || p.Link == nil { // handle external functions and ELF section symbols
602 if oprange[AANDN&obj.AMask] == nil {
603 ctxt.Diag("ppc64 ops not initialized, call ppc64.buildop first")
606 c := ctxt9{ctxt: ctxt, newprog: newprog, cursym: cursym, autosize: int32(p.To.Offset)}
613 for p = p.Link; p != nil; p = p.Link {
618 if p.As == obj.APCALIGN {
619 a := c.vregoff(&p.From)
620 m = addpad(pc, a, ctxt, cursym)
622 if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
623 ctxt.Diag("zero-width instruction\n%v", p)
634 * if any procedure is large enough to
635 * generate a large SBRA branch, then
636 * generate extra passes putting branches
637 * around jmps to fix. this is rare.
647 for p = c.cursym.Func().Text.Link; p != nil; p = p.Link {
651 // very large conditional branches
652 if (o.type_ == 16 || o.type_ == 17) && p.To.Target() != nil {
653 otxt = p.To.Target().Pc - pc
654 if otxt < -(1<<15)+10 || otxt >= (1<<15)-10 {
655 // Assemble the instruction with a target not too far to figure out BI and BO fields.
656 // If only the CTR or BI (the CR bit) are tested, the conditional branch can be inverted,
657 // and only one extra branch is needed to reach the target.
659 p.To.SetTarget(p.Link)
660 c.asmout(p, o, out[:])
663 bo := int64(out[0]>>21) & 31
664 bi := int16((out[0] >> 16) & 31)
668 // A conditional branch that is unconditionally taken. This cannot be inverted.
669 } else if bo&0x10 == 0x10 {
670 // A branch based on the value of CTR. Invert the CTR comparison against zero bit.
673 } else if bo&0x04 == 0x04 {
674 // A branch based on CR bit. Invert the BI comparison bit.
681 // BC bo,...,far_away_target
684 // BC invert(bo),next_insn
685 // JMP far_away_target
689 p.From = obj.Addr{Type: obj.TYPE_CONST, Name: obj.NAME_NONE, Offset: bo}
692 q.To.Type = obj.TYPE_BRANCH
693 q.To.SetTarget(p.To.Target())
695 p.To.SetTarget(p.Link)
697 p.Reg = bi // TODO: This is a hack since BI bits are not enumerated as registers
700 // BC ...,far_away_target
706 // JMP far_away_target
713 q.To.Type = obj.TYPE_BRANCH
714 q.To.SetTarget(p.To.Target())
720 q.To.Type = obj.TYPE_BRANCH
721 q.To.SetTarget(q.Link.Link)
729 if p.As == obj.APCALIGN {
730 a := c.vregoff(&p.From)
731 m = addpad(pc, a, ctxt, cursym)
733 if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
734 ctxt.Diag("zero-width instruction\n%v", p)
746 if r := pc & funcAlignMask; r != 0 {
753 * lay out the code, emitting code and data relocations.
756 c.cursym.Grow(c.cursym.Size)
760 for p := c.cursym.Func().Text.Link; p != nil; p = p.Link {
763 if int(o.size) > 4*len(out) {
764 log.Fatalf("out array in span9 is too small, need at least %d for %v", o.size/4, p)
766 // asmout is not set up to add large amounts of padding
767 if o.type_ == 0 && p.As == obj.APCALIGN {
768 pad := LOP_RRR(OP_OR, REGZERO, REGZERO, REGZERO)
769 aln := c.vregoff(&p.From)
770 v := addpad(p.Pc, aln, c.ctxt, c.cursym)
772 // Same padding instruction for all
773 for i = 0; i < int32(v/4); i++ {
774 c.ctxt.Arch.ByteOrder.PutUint32(bp, pad)
779 c.asmout(p, o, out[:])
780 for i = 0; i < int32(o.size/4); i++ {
781 c.ctxt.Arch.ByteOrder.PutUint32(bp, out[i])
788 func isint32(v int64) bool {
789 return int64(int32(v)) == v
792 func isuint32(v uint64) bool {
793 return uint64(uint32(v)) == v
796 func (c *ctxt9) aclass(a *obj.Addr) int {
802 if REG_R0 <= a.Reg && a.Reg <= REG_R31 {
805 if REG_F0 <= a.Reg && a.Reg <= REG_F31 {
808 if REG_V0 <= a.Reg && a.Reg <= REG_V31 {
811 if REG_VS0 <= a.Reg && a.Reg <= REG_VS63 {
814 if REG_CR0 <= a.Reg && a.Reg <= REG_CR7 || a.Reg == REG_CR {
817 if REG_SPR0 <= a.Reg && a.Reg <= REG_SPR0+1023 {
832 if REG_DCR0 <= a.Reg && a.Reg <= REG_DCR0+1023 {
835 if a.Reg == REG_FPSCR {
842 case obj.NAME_GOTREF, obj.NAME_TOCREF:
845 case obj.NAME_EXTERN,
847 c.instoffset = a.Offset
850 } else if a.Sym.Type == objabi.STLSBSS {
851 // For PIC builds, use 12 byte got initial-exec TLS accesses.
852 if c.ctxt.Flag_shared {
855 // Otherwise, use 8 byte local-exec TLS accesses.
862 c.instoffset = int64(c.autosize) + a.Offset
863 if c.instoffset >= -BIG && c.instoffset < BIG {
869 c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.FixedFrameSize()
870 if c.instoffset >= -BIG && c.instoffset < BIG {
876 c.instoffset = a.Offset
877 if c.instoffset == 0 {
880 if c.instoffset >= -BIG && c.instoffset < BIG {
888 case obj.TYPE_TEXTSIZE:
891 case obj.TYPE_FCONST:
892 // The only cases where FCONST will occur are with float64 +/- 0.
893 // All other float constants are generated in memory.
894 f64 := a.Val.(float64)
896 if math.Signbit(f64) {
901 log.Fatalf("Unexpected nonzero FCONST operand %v", a)
907 c.instoffset = a.Offset
909 if -BIG <= c.instoffset && c.instoffset <= BIG {
912 if isint32(c.instoffset) {
918 case obj.NAME_EXTERN,
924 c.instoffset = a.Offset
928 c.instoffset = int64(c.autosize) + a.Offset
929 if c.instoffset >= -BIG && c.instoffset < BIG {
935 c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.FixedFrameSize()
936 if c.instoffset >= -BIG && c.instoffset < BIG {
945 if c.instoffset >= 0 {
946 if c.instoffset == 0 {
949 if c.instoffset <= 0x7fff {
952 if c.instoffset <= 0xffff {
955 if c.instoffset&0xffff == 0 && isuint32(uint64(c.instoffset)) { /* && (instoffset & (1<<31)) == 0) */
958 if isint32(c.instoffset) || isuint32(uint64(c.instoffset)) {
964 if c.instoffset >= -0x8000 {
967 if c.instoffset&0xffff == 0 && isint32(c.instoffset) {
970 if isint32(c.instoffset) {
975 case obj.TYPE_BRANCH:
976 if a.Sym != nil && c.ctxt.Flag_dynlink {
985 func prasm(p *obj.Prog) {
986 fmt.Printf("%v\n", p)
989 func (c *ctxt9) oplook(p *obj.Prog) *Optab {
994 a1 = int(p.From.Class)
996 a1 = c.aclass(&p.From) + 1
997 p.From.Class = int8(a1)
1001 argsv := [3]int{C_NONE + 1, C_NONE + 1, C_NONE + 1}
1002 for i, ap := range p.RestArgs {
1003 argsv[i] = int(ap.Addr.Class)
1005 argsv[i] = c.aclass(&ap.Addr) + 1
1006 ap.Addr.Class = int8(argsv[i])
1014 a6 := int(p.To.Class)
1016 a6 = c.aclass(&p.To) + 1
1017 p.To.Class = int8(a6)
1023 if REG_R0 <= p.Reg && p.Reg <= REG_R31 {
1025 } else if REG_V0 <= p.Reg && p.Reg <= REG_V31 {
1027 } else if REG_VS0 <= p.Reg && p.Reg <= REG_VS63 {
1029 } else if REG_F0 <= p.Reg && p.Reg <= REG_F31 {
1034 // c.ctxt.Logf("oplook %v %d %d %d %d\n", p, a1, a2, a3, a4, a5, a6)
1035 ops := oprange[p.As&obj.AMask]
1042 for i := range ops {
1044 if c1[op.a1] && c2[op.a2] && c3[op.a3] && c4[op.a4] && c5[op.a5] && c6[op.a6] {
1045 p.Optab = uint16(cap(optab) - cap(ops) + i + 1)
1050 c.ctxt.Diag("illegal combination %v %v %v %v %v %v %v", p.As, DRconv(a1), DRconv(a2), DRconv(a3), DRconv(a4), DRconv(a5), DRconv(a6))
1058 func cmp(a int, b int) bool {
1064 if b == C_ZCON || b == C_SCON || b == C_UCON || b == C_ADDCON || b == C_ANDCON {
1069 if b == C_ZCON || b == C_SCON {
1074 if b == C_ZCON || b == C_SCON {
1079 if b == C_LR || b == C_XER || b == C_CTR {
1109 if b == C_SOREG || b == C_ZOREG {
1115 return r0iszero != 0 /*TypeKind(100016)*/
1119 /* Allow any VR argument as a VSR operand. */
1133 func (x ocmp) Len() int {
1137 func (x ocmp) Swap(i, j int) {
1138 x[i], x[j] = x[j], x[i]
1141 // Used when sorting the optab. Sorting is
1142 // done in a way so that the best choice of
1143 // opcode/operand combination is considered first.
1144 func (x ocmp) Less(i, j int) bool {
1147 n := int(p1.as) - int(p2.as)
1152 // Consider those that generate fewer
1153 // instructions first.
1154 n = int(p1.size) - int(p2.size)
1158 // operand order should match
1159 // better choices first
1160 n = int(p1.a1) - int(p2.a1)
1164 n = int(p1.a2) - int(p2.a2)
1168 n = int(p1.a3) - int(p2.a3)
1172 n = int(p1.a4) - int(p2.a4)
1176 n = int(p1.a5) - int(p2.a5)
1180 n = int(p1.a6) - int(p2.a6)
1187 // Add an entry to the opcode table for
1188 // a new opcode b0 with the same operand combinations
1190 func opset(a, b0 obj.As) {
1191 oprange[a&obj.AMask] = oprange[b0]
1194 // Build the opcode table
1195 func buildop(ctxt *obj.Link) {
1196 if oprange[AANDN&obj.AMask] != nil {
1197 // Already initialized; stop now.
1198 // This happens in the cmd/asm tests,
1199 // each of which re-initializes the arch.
1205 for i := 0; i < C_NCLASS; i++ {
1206 for n = 0; n < C_NCLASS; n++ {
1212 for n = 0; optab[n].as != obj.AXXX; n++ {
1214 sort.Sort(ocmp(optab[:n]))
1215 for i := 0; i < n; i++ {
1219 for optab[i].as == r {
1222 oprange[r0] = optab[start:i]
1227 ctxt.Diag("unknown op in build: %v", r)
1228 log.Fatalf("instruction missing from switch in asm9.go:buildop: %v", r)
1230 case ADCBF: /* unary indexed: op (b+a); op (b) */
1239 case AECOWX: /* indexed store: op s,(b+a); op s,(b) */
1245 case AREM: /* macro */
1257 case ADIVW: /* op Rb[,Ra],Rd */
1262 opset(AMULHWUCC, r0)
1264 opset(AMULLWVCC, r0)
1272 opset(ADIVWUVCC, r0)
1289 opset(AMULHDUCC, r0)
1291 opset(AMULLDVCC, r0)
1298 opset(ADIVDEUCC, r0)
1303 opset(ADIVDUVCC, r0)
1315 case APOPCNTD: /* popcntd, popcntw, popcntb, cnttzw, cnttzd */
1319 opset(ACNTTZWCC, r0)
1321 opset(ACNTTZDCC, r0)
1323 case ACOPY: /* copy, paste. */
1326 case AMADDHD: /* maddhd, maddhdu, maddld */
1330 case AMOVBZ: /* lbz, stz, rlwm(r/r), lhz, lha, stz, and x variants */
1334 case AMOVBZU: /* lbz[x]u, stb[x]u, lhz[x]u, lha[x]u, sth[u]x, ld[x]u, std[u]x */
1343 case ALV: /* lvebx, lvehx, lvewx, lvx, lvxl, lvsl, lvsr */
1352 case ASTV: /* stvebx, stvehx, stvewx, stvx, stvxl */
1359 case AVAND: /* vand, vandc, vnand */
1364 case AVMRGOW: /* vmrgew, vmrgow */
1367 case AVOR: /* vor, vorc, vxor, vnor, veqv */
1374 case AVADDUM: /* vaddubm, vadduhm, vadduwm, vaddudm, vadduqm */
1381 case AVADDCU: /* vaddcuq, vaddcuw */
1385 case AVADDUS: /* vaddubs, vadduhs, vadduws */
1390 case AVADDSS: /* vaddsbs, vaddshs, vaddsws */
1395 case AVADDE: /* vaddeuqm, vaddecuq */
1396 opset(AVADDEUQM, r0)
1397 opset(AVADDECUQ, r0)
1399 case AVSUBUM: /* vsububm, vsubuhm, vsubuwm, vsubudm, vsubuqm */
1406 case AVSUBCU: /* vsubcuq, vsubcuw */
1410 case AVSUBUS: /* vsububs, vsubuhs, vsubuws */
1415 case AVSUBSS: /* vsubsbs, vsubshs, vsubsws */
1420 case AVSUBE: /* vsubeuqm, vsubecuq */
1421 opset(AVSUBEUQM, r0)
1422 opset(AVSUBECUQ, r0)
1424 case AVMULESB: /* vmulesb, vmulosb, vmuleub, vmuloub, vmulosh, vmulouh, vmulesw, vmulosw, vmuleuw, vmulouw, vmuluwm */
1437 case AVPMSUM: /* vpmsumb, vpmsumh, vpmsumw, vpmsumd */
1443 case AVR: /* vrlb, vrlh, vrlw, vrld */
1449 case AVS: /* vs[l,r], vs[l,r]o, vs[l,r]b, vs[l,r]h, vs[l,r]w, vs[l,r]d */
1463 case AVSA: /* vsrab, vsrah, vsraw, vsrad */
1469 case AVSOI: /* vsldoi */
1472 case AVCLZ: /* vclzb, vclzh, vclzw, vclzd */
1478 case AVPOPCNT: /* vpopcntb, vpopcnth, vpopcntw, vpopcntd */
1479 opset(AVPOPCNTB, r0)
1480 opset(AVPOPCNTH, r0)
1481 opset(AVPOPCNTW, r0)
1482 opset(AVPOPCNTD, r0)
1484 case AVCMPEQ: /* vcmpequb[.], vcmpequh[.], vcmpequw[.], vcmpequd[.] */
1485 opset(AVCMPEQUB, r0)
1486 opset(AVCMPEQUBCC, r0)
1487 opset(AVCMPEQUH, r0)
1488 opset(AVCMPEQUHCC, r0)
1489 opset(AVCMPEQUW, r0)
1490 opset(AVCMPEQUWCC, r0)
1491 opset(AVCMPEQUD, r0)
1492 opset(AVCMPEQUDCC, r0)
1494 case AVCMPGT: /* vcmpgt[u,s]b[.], vcmpgt[u,s]h[.], vcmpgt[u,s]w[.], vcmpgt[u,s]d[.] */
1495 opset(AVCMPGTUB, r0)
1496 opset(AVCMPGTUBCC, r0)
1497 opset(AVCMPGTUH, r0)
1498 opset(AVCMPGTUHCC, r0)
1499 opset(AVCMPGTUW, r0)
1500 opset(AVCMPGTUWCC, r0)
1501 opset(AVCMPGTUD, r0)
1502 opset(AVCMPGTUDCC, r0)
1503 opset(AVCMPGTSB, r0)
1504 opset(AVCMPGTSBCC, r0)
1505 opset(AVCMPGTSH, r0)
1506 opset(AVCMPGTSHCC, r0)
1507 opset(AVCMPGTSW, r0)
1508 opset(AVCMPGTSWCC, r0)
1509 opset(AVCMPGTSD, r0)
1510 opset(AVCMPGTSDCC, r0)
1512 case AVCMPNEZB: /* vcmpnezb[.] */
1513 opset(AVCMPNEZBCC, r0)
1515 opset(AVCMPNEBCC, r0)
1517 opset(AVCMPNEHCC, r0)
1519 opset(AVCMPNEWCC, r0)
1521 case AVPERM: /* vperm */
1522 opset(AVPERMXOR, r0)
1525 case AVBPERMQ: /* vbpermq, vbpermd */
1528 case AVSEL: /* vsel */
1531 case AVSPLTB: /* vspltb, vsplth, vspltw */
1535 case AVSPLTISB: /* vspltisb, vspltish, vspltisw */
1536 opset(AVSPLTISH, r0)
1537 opset(AVSPLTISW, r0)
1539 case AVCIPH: /* vcipher, vcipherlast */
1541 opset(AVCIPHERLAST, r0)
1543 case AVNCIPH: /* vncipher, vncipherlast */
1544 opset(AVNCIPHER, r0)
1545 opset(AVNCIPHERLAST, r0)
1547 case AVSBOX: /* vsbox */
1550 case AVSHASIGMA: /* vshasigmaw, vshasigmad */
1551 opset(AVSHASIGMAW, r0)
1552 opset(AVSHASIGMAD, r0)
1554 case ALXVD2X: /* lxvd2x, lxvdsx, lxvw4x, lxvh8x, lxvb16x */
1560 case ALXV: /* lxv */
1563 case ALXVL: /* lxvl, lxvll, lxvx */
1567 case ASTXVD2X: /* stxvd2x, stxvdsx, stxvw4x, stxvh8x, stxvb16x */
1570 opset(ASTXVB16X, r0)
1572 case ASTXV: /* stxv */
1575 case ASTXVL: /* stxvl, stxvll, stvx */
1579 case ALXSDX: /* lxsdx */
1582 case ASTXSDX: /* stxsdx */
1585 case ALXSIWAX: /* lxsiwax, lxsiwzx */
1588 case ASTXSIWX: /* stxsiwx */
1591 case AMFVSRD: /* mfvsrd, mfvsrwz (and extended mnemonics), mfvsrld */
1597 case AMTVSRD: /* mtvsrd, mtvsrwa, mtvsrwz (and extended mnemonics), mtvsrdd, mtvsrws */
1604 case AXXLAND: /* xxland, xxlandc, xxleqv, xxlnand */
1609 case AXXLOR: /* xxlorc, xxlnor, xxlor, xxlxor */
1615 case AXXSEL: /* xxsel */
1618 case AXXMRGHW: /* xxmrghw, xxmrglw */
1621 case AXXSPLTW: /* xxspltw */
1624 case AXXSPLTIB: /* xxspltib */
1625 opset(AXXSPLTIB, r0)
1627 case AXXPERM: /* xxpermdi */
1630 case AXXSLDWI: /* xxsldwi */
1631 opset(AXXPERMDI, r0)
1634 case AXXBRQ: /* xxbrq, xxbrd, xxbrw, xxbrh */
1639 case AXSCVDPSP: /* xscvdpsp, xscvspdp, xscvdpspn, xscvspdpn */
1640 opset(AXSCVSPDP, r0)
1641 opset(AXSCVDPSPN, r0)
1642 opset(AXSCVSPDPN, r0)
1644 case AXVCVDPSP: /* xvcvdpsp, xvcvspdp */
1645 opset(AXVCVSPDP, r0)
1647 case AXSCVDPSXDS: /* xscvdpsxds, xscvdpsxws, xscvdpuxds, xscvdpuxws */
1648 opset(AXSCVDPSXWS, r0)
1649 opset(AXSCVDPUXDS, r0)
1650 opset(AXSCVDPUXWS, r0)
1652 case AXSCVSXDDP: /* xscvsxddp, xscvuxddp, xscvsxdsp, xscvuxdsp */
1653 opset(AXSCVUXDDP, r0)
1654 opset(AXSCVSXDSP, r0)
1655 opset(AXSCVUXDSP, r0)
1657 case AXVCVDPSXDS: /* xvcvdpsxds, xvcvdpsxws, xvcvdpuxds, xvcvdpuxws, xvcvspsxds, xvcvspsxws, xvcvspuxds, xvcvspuxws */
1658 opset(AXVCVDPSXDS, r0)
1659 opset(AXVCVDPSXWS, r0)
1660 opset(AXVCVDPUXDS, r0)
1661 opset(AXVCVDPUXWS, r0)
1662 opset(AXVCVSPSXDS, r0)
1663 opset(AXVCVSPSXWS, r0)
1664 opset(AXVCVSPUXDS, r0)
1665 opset(AXVCVSPUXWS, r0)
1667 case AXVCVSXDDP: /* xvcvsxddp, xvcvsxwdp, xvcvuxddp, xvcvuxwdp, xvcvsxdsp, xvcvsxwsp, xvcvuxdsp, xvcvuxwsp */
1668 opset(AXVCVSXWDP, r0)
1669 opset(AXVCVUXDDP, r0)
1670 opset(AXVCVUXWDP, r0)
1671 opset(AXVCVSXDSP, r0)
1672 opset(AXVCVSXWSP, r0)
1673 opset(AXVCVUXDSP, r0)
1674 opset(AXVCVUXWSP, r0)
1676 case AAND: /* logical op Rb,Rs,Ra; no literal */
1690 case AADDME: /* op Ra, Rd */
1694 opset(AADDMEVCC, r0)
1698 opset(AADDZEVCC, r0)
1702 opset(ASUBMEVCC, r0)
1706 opset(ASUBZEVCC, r0)
1726 case AEXTSB: /* op Rs, Ra */
1732 opset(ACNTLZWCC, r0)
1736 opset(ACNTLZDCC, r0)
1738 case AFABS: /* fop [s,]d */
1750 opset(AFCTIWZCC, r0)
1754 opset(AFCTIDZCC, r0)
1758 opset(AFCFIDUCC, r0)
1760 opset(AFCFIDSCC, r0)
1772 opset(AFRSQRTECC, r0)
1776 opset(AFSQRTSCC, r0)
1783 opset(AFCPSGNCC, r0)
1796 opset(AFMADDSCC, r0)
1800 opset(AFMSUBSCC, r0)
1802 opset(AFNMADDCC, r0)
1804 opset(AFNMADDSCC, r0)
1806 opset(AFNMSUBCC, r0)
1808 opset(AFNMSUBSCC, r0)
1824 opset(AMTFSB0CC, r0)
1826 opset(AMTFSB1CC, r0)
1828 case ANEG: /* op [Ra,] Rd */
1834 case AOR: /* or/xor Rb,Rs,Ra; ori/xori $uimm,Rs,R */
1837 case AORIS: /* oris/xoris $uimm,Rs,Ra */
1852 case ASRAW: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */
1856 opset(AEXTSWSLICC, r0)
1858 case ASRAD: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */
1861 case ASUB: /* SUB Ra,Rb,Rd => subf Rd,ra,rb */
1889 opset(ARLDIMICC, r0)
1900 opset(ARLDICLCC, r0)
1902 opset(ARLDICRCC, r0)
1905 opset(ACLRLSLDI, r0)
1918 case ASYSCALL: /* just the op; flow of control */
1957 case AMOVW: /* load/store/move word with sign extension; move 32-bit literals */
1958 opset(AMOVWZ, r0) /* Same as above, but zero extended */
1962 AANDCC, /* and. Rb,Rs,Ra; andi. $uimm,Rs,Ra */
1967 AMOVD, /* load/store/move 64-bit values, including 32-bit literals with/without sign-extension */
1968 AMOVB, /* macro: move byte with sign extension */
1969 AMOVBU, /* macro: move byte with sign extension & update */
1971 /* op $s[,r2],r3; op r1[,r2],r3; no cc/v */
1972 ASUBC, /* op r1,$s,r3; op r1[,r2],r3 */
1998 func OPVXX1(o uint32, xo uint32, oe uint32) uint32 {
1999 return o<<26 | xo<<1 | oe<<11
2002 func OPVXX2(o uint32, xo uint32, oe uint32) uint32 {
2003 return o<<26 | xo<<2 | oe<<11
2006 func OPVXX2VA(o uint32, xo uint32, oe uint32) uint32 {
2007 return o<<26 | xo<<2 | oe<<16
2010 func OPVXX3(o uint32, xo uint32, oe uint32) uint32 {
2011 return o<<26 | xo<<3 | oe<<11
2014 func OPVXX4(o uint32, xo uint32, oe uint32) uint32 {
2015 return o<<26 | xo<<4 | oe<<11
2018 func OPDQ(o uint32, xo uint32, oe uint32) uint32 {
2019 return o<<26 | xo | oe<<4
2022 func OPVX(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2023 return o<<26 | xo | oe<<11 | rc&1
2026 func OPVC(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2027 return o<<26 | xo | oe<<11 | (rc&1)<<10
2030 func OPVCC(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2031 return o<<26 | xo<<1 | oe<<10 | rc&1
2034 func OPCC(o uint32, xo uint32, rc uint32) uint32 {
2035 return OPVCC(o, xo, 0, rc)
2038 /* Generate MD-form opcode */
2039 func OPMD(o, xo, rc uint32) uint32 {
2040 return o<<26 | xo<<2 | rc&1
2043 /* the order is dest, a/s, b/imm for both arithmetic and logical operations */
2044 func AOP_RRR(op uint32, d uint32, a uint32, b uint32) uint32 {
2045 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11
2048 /* VX-form 2-register operands, r/none/r */
2049 func AOP_RR(op uint32, d uint32, a uint32) uint32 {
2050 return op | (d&31)<<21 | (a&31)<<11
2053 /* VA-form 4-register operands */
2054 func AOP_RRRR(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2055 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&31)<<6
2058 func AOP_IRR(op uint32, d uint32, a uint32, simm uint32) uint32 {
2059 return op | (d&31)<<21 | (a&31)<<16 | simm&0xFFFF
2062 /* VX-form 2-register + UIM operands */
2063 func AOP_VIRR(op uint32, d uint32, a uint32, simm uint32) uint32 {
2064 return op | (d&31)<<21 | (simm&0xFFFF)<<16 | (a&31)<<11
2067 /* VX-form 2-register + ST + SIX operands */
2068 func AOP_IIRR(op uint32, d uint32, a uint32, sbit uint32, simm uint32) uint32 {
2069 return op | (d&31)<<21 | (a&31)<<16 | (sbit&1)<<15 | (simm&0xF)<<11
2072 /* VA-form 3-register + SHB operands */
2073 func AOP_IRRR(op uint32, d uint32, a uint32, b uint32, simm uint32) uint32 {
2074 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (simm&0xF)<<6
2077 /* VX-form 1-register + SIM operands */
2078 func AOP_IR(op uint32, d uint32, simm uint32) uint32 {
2079 return op | (d&31)<<21 | (simm&31)<<16
2082 /* XX1-form 3-register operands, 1 VSR operand */
2083 func AOP_XX1(op uint32, r uint32, a uint32, b uint32) uint32 {
2084 return op | (r&31)<<21 | (a&31)<<16 | (b&31)<<11 | (r&32)>>5
2087 /* XX2-form 3-register operands, 2 VSR operands */
2088 func AOP_XX2(op uint32, xt uint32, a uint32, xb uint32) uint32 {
2089 return op | (xt&31)<<21 | (a&3)<<16 | (xb&31)<<11 | (xb&32)>>4 | (xt&32)>>5
2092 /* XX3-form 3 VSR operands */
2093 func AOP_XX3(op uint32, xt uint32, xa uint32, xb uint32) uint32 {
2094 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2097 /* XX3-form 3 VSR operands + immediate */
2098 func AOP_XX3I(op uint32, xt uint32, xa uint32, xb uint32, c uint32) uint32 {
2099 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (c&3)<<8 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2102 /* XX4-form, 4 VSR operands */
2103 func AOP_XX4(op uint32, xt uint32, xa uint32, xb uint32, xc uint32) uint32 {
2104 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xc&31)<<6 | (xc&32)>>2 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2107 /* DQ-form, VSR register, register + offset operands */
2108 func AOP_DQ(op uint32, xt uint32, a uint32, b uint32) uint32 {
2109 /* The EA for this instruction form is (RA) + DQ << 4, where DQ is a 12-bit signed integer. */
2110 /* In order to match the output of the GNU objdump (and make the usage in Go asm easier), the */
2111 /* instruction is called using the sign extended value (i.e. a valid offset would be -32752 or 32752, */
2112 /* not -2047 or 2047), so 'b' needs to be adjusted to the expected 12-bit DQ value. Bear in mind that */
2113 /* bits 0 to 3 in 'dq' need to be zero, otherwise this will generate an illegal instruction. */
2114 /* If in doubt how this instruction form is encoded, refer to ISA 3.0b, pages 492 and 507. */
2116 return op | (xt&31)<<21 | (a&31)<<16 | (dq&4095)<<4 | (xt&32)>>2
2119 /* Z23-form, 3-register operands + CY field */
2120 func AOP_Z23I(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2121 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&3)<<9
2124 /* X-form, 3-register operands + EH field */
2125 func AOP_RRRI(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2126 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c & 1)
2129 func LOP_RRR(op uint32, a uint32, s uint32, b uint32) uint32 {
2130 return op | (s&31)<<21 | (a&31)<<16 | (b&31)<<11
2133 func LOP_IRR(op uint32, a uint32, s uint32, uimm uint32) uint32 {
2134 return op | (s&31)<<21 | (a&31)<<16 | uimm&0xFFFF
2137 func OP_BR(op uint32, li uint32, aa uint32) uint32 {
2138 return op | li&0x03FFFFFC | aa<<1
2141 func OP_BC(op uint32, bo uint32, bi uint32, bd uint32, aa uint32) uint32 {
2142 return op | (bo&0x1F)<<21 | (bi&0x1F)<<16 | bd&0xFFFC | aa<<1
2145 func OP_BCR(op uint32, bo uint32, bi uint32) uint32 {
2146 return op | (bo&0x1F)<<21 | (bi&0x1F)<<16
2149 func OP_RLW(op uint32, a uint32, s uint32, sh uint32, mb uint32, me uint32) uint32 {
2150 return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | (mb&31)<<6 | (me&31)<<1
2153 func AOP_RLDIC(op uint32, a uint32, s uint32, sh uint32, m uint32) uint32 {
2154 return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1 | (m&31)<<6 | ((m&32)>>5)<<5
2157 func AOP_EXTSWSLI(op uint32, a uint32, s uint32, sh uint32) uint32 {
2158 return op | (a&31)<<21 | (s&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1
2161 func AOP_ISEL(op uint32, t uint32, a uint32, b uint32, bc uint32) uint32 {
2162 return op | (t&31)<<21 | (a&31)<<16 | (b&31)<<11 | (bc&0x1F)<<6
2166 /* each rhs is OPVCC(_, _, _, _) */
2167 OP_ADD = 31<<26 | 266<<1 | 0<<10 | 0
2168 OP_ADDI = 14<<26 | 0<<1 | 0<<10 | 0
2169 OP_ADDIS = 15<<26 | 0<<1 | 0<<10 | 0
2170 OP_ANDI = 28<<26 | 0<<1 | 0<<10 | 0
2171 OP_EXTSB = 31<<26 | 954<<1 | 0<<10 | 0
2172 OP_EXTSH = 31<<26 | 922<<1 | 0<<10 | 0
2173 OP_EXTSW = 31<<26 | 986<<1 | 0<<10 | 0
2174 OP_ISEL = 31<<26 | 15<<1 | 0<<10 | 0
2175 OP_MCRF = 19<<26 | 0<<1 | 0<<10 | 0
2176 OP_MCRFS = 63<<26 | 64<<1 | 0<<10 | 0
2177 OP_MCRXR = 31<<26 | 512<<1 | 0<<10 | 0
2178 OP_MFCR = 31<<26 | 19<<1 | 0<<10 | 0
2179 OP_MFFS = 63<<26 | 583<<1 | 0<<10 | 0
2180 OP_MFSPR = 31<<26 | 339<<1 | 0<<10 | 0
2181 OP_MFSR = 31<<26 | 595<<1 | 0<<10 | 0
2182 OP_MFSRIN = 31<<26 | 659<<1 | 0<<10 | 0
2183 OP_MTCRF = 31<<26 | 144<<1 | 0<<10 | 0
2184 OP_MTFSF = 63<<26 | 711<<1 | 0<<10 | 0
2185 OP_MTFSFI = 63<<26 | 134<<1 | 0<<10 | 0
2186 OP_MTSPR = 31<<26 | 467<<1 | 0<<10 | 0
2187 OP_MTSR = 31<<26 | 210<<1 | 0<<10 | 0
2188 OP_MTSRIN = 31<<26 | 242<<1 | 0<<10 | 0
2189 OP_MULLW = 31<<26 | 235<<1 | 0<<10 | 0
2190 OP_MULLD = 31<<26 | 233<<1 | 0<<10 | 0
2191 OP_OR = 31<<26 | 444<<1 | 0<<10 | 0
2192 OP_ORI = 24<<26 | 0<<1 | 0<<10 | 0
2193 OP_ORIS = 25<<26 | 0<<1 | 0<<10 | 0
2194 OP_RLWINM = 21<<26 | 0<<1 | 0<<10 | 0
2195 OP_RLWNM = 23<<26 | 0<<1 | 0<<10 | 0
2196 OP_SUBF = 31<<26 | 40<<1 | 0<<10 | 0
2197 OP_RLDIC = 30<<26 | 4<<1 | 0<<10 | 0
2198 OP_RLDICR = 30<<26 | 2<<1 | 0<<10 | 0
2199 OP_RLDICL = 30<<26 | 0<<1 | 0<<10 | 0
2200 OP_RLDCL = 30<<26 | 8<<1 | 0<<10 | 0
2201 OP_EXTSWSLI = 31<<26 | 445<<2
2204 func oclass(a *obj.Addr) int {
2205 return int(a.Class) - 1
2213 // This function determines when a non-indexed load or store is D or
2214 // DS form for use in finding the size of the offset field in the instruction.
2215 // The size is needed when setting the offset value in the instruction
2216 // and when generating relocation for that field.
2217 // DS form instructions include: ld, ldu, lwa, std, stdu. All other
2218 // loads and stores with an offset field are D form. This function should
2219 // only be called with the same opcodes as are handled by opstore and opload.
2220 func (c *ctxt9) opform(insn uint32) int {
2223 c.ctxt.Diag("bad insn in loadform: %x", insn)
2224 case OPVCC(58, 0, 0, 0), // ld
2225 OPVCC(58, 0, 0, 1), // ldu
2226 OPVCC(58, 0, 0, 0) | 1<<1, // lwa
2227 OPVCC(62, 0, 0, 0), // std
2228 OPVCC(62, 0, 0, 1): //stdu
2230 case OP_ADDI, // add
2231 OPVCC(32, 0, 0, 0), // lwz
2232 OPVCC(33, 0, 0, 0), // lwzu
2233 OPVCC(34, 0, 0, 0), // lbz
2234 OPVCC(35, 0, 0, 0), // lbzu
2235 OPVCC(40, 0, 0, 0), // lhz
2236 OPVCC(41, 0, 0, 0), // lhzu
2237 OPVCC(42, 0, 0, 0), // lha
2238 OPVCC(43, 0, 0, 0), // lhau
2239 OPVCC(46, 0, 0, 0), // lmw
2240 OPVCC(48, 0, 0, 0), // lfs
2241 OPVCC(49, 0, 0, 0), // lfsu
2242 OPVCC(50, 0, 0, 0), // lfd
2243 OPVCC(51, 0, 0, 0), // lfdu
2244 OPVCC(36, 0, 0, 0), // stw
2245 OPVCC(37, 0, 0, 0), // stwu
2246 OPVCC(38, 0, 0, 0), // stb
2247 OPVCC(39, 0, 0, 0), // stbu
2248 OPVCC(44, 0, 0, 0), // sth
2249 OPVCC(45, 0, 0, 0), // sthu
2250 OPVCC(47, 0, 0, 0), // stmw
2251 OPVCC(52, 0, 0, 0), // stfs
2252 OPVCC(53, 0, 0, 0), // stfsu
2253 OPVCC(54, 0, 0, 0), // stfd
2254 OPVCC(55, 0, 0, 0): // stfdu
2260 // Encode instructions and create relocation for accessing s+d according to the
2261 // instruction op with source or destination (as appropriate) register reg.
2262 func (c *ctxt9) symbolAccess(s *obj.LSym, d int64, reg int16, op uint32, reuse bool) (o1, o2 uint32) {
2263 if c.ctxt.Headtype == objabi.Haix {
2264 // Every symbol access must be made via a TOC anchor.
2265 c.ctxt.Diag("symbolAccess called for %s", s.Name)
2268 form := c.opform(op)
2269 if c.ctxt.Flag_shared {
2274 // If reg can be reused when computing the symbol address,
2275 // use it instead of REGTMP.
2277 o1 = AOP_IRR(OP_ADDIS, REGTMP, base, 0)
2278 o2 = AOP_IRR(op, uint32(reg), REGTMP, 0)
2280 o1 = AOP_IRR(OP_ADDIS, uint32(reg), base, 0)
2281 o2 = AOP_IRR(op, uint32(reg), uint32(reg), 0)
2283 rel := obj.Addrel(c.cursym)
2284 rel.Off = int32(c.pc)
2288 if c.ctxt.Flag_shared {
2291 rel.Type = objabi.R_ADDRPOWER_TOCREL
2293 rel.Type = objabi.R_ADDRPOWER_TOCREL_DS
2299 rel.Type = objabi.R_ADDRPOWER
2301 rel.Type = objabi.R_ADDRPOWER_DS
2310 func getmask(m []byte, v uint32) bool {
2313 if v != ^uint32(0) && v&(1<<31) != 0 && v&1 != 0 { /* MB > ME */
2324 for i := 0; i < 32; i++ {
2325 if v&(1<<uint(31-i)) != 0 {
2330 if i >= 32 || v&(1<<uint(31-i)) == 0 {
2336 if v&(1<<uint(31-i)) != 0 {
2347 func (c *ctxt9) maskgen(p *obj.Prog, m []byte, v uint32) {
2349 c.ctxt.Diag("cannot generate mask #%x\n%v", v, p)
2354 * 64-bit masks (rldic etc)
2356 func getmask64(m []byte, v uint64) bool {
2359 for i := 0; i < 64; i++ {
2360 if v&(uint64(1)<<uint(63-i)) != 0 {
2365 if i >= 64 || v&(uint64(1)<<uint(63-i)) == 0 {
2371 if v&(uint64(1)<<uint(63-i)) != 0 {
2382 func (c *ctxt9) maskgen64(p *obj.Prog, m []byte, v uint64) {
2383 if !getmask64(m, v) {
2384 c.ctxt.Diag("cannot generate mask #%x\n%v", v, p)
2388 func loadu32(r int, d int64) uint32 {
2390 if isuint32(uint64(d)) {
2391 return LOP_IRR(OP_ORIS, uint32(r), REGZERO, uint32(v))
2393 return AOP_IRR(OP_ADDIS, uint32(r), REGZERO, uint32(v))
2396 func high16adjusted(d int32) uint16 {
2398 return uint16((d >> 16) + 1)
2400 return uint16(d >> 16)
2403 func (c *ctxt9) asmout(p *obj.Prog, o *Optab, out []uint32) {
2410 //print("%v => case %d\n", p, o->type);
2413 c.ctxt.Diag("unknown type %d", o.type_)
2416 case 0: /* pseudo ops */
2419 case 2: /* int/cr/fp op Rb,[Ra],Rd */
2425 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2427 case 3: /* mov $soreg/addcon/andcon/ucon, r ==> addis/oris/addi/ori $i,reg',r */
2428 d := c.vregoff(&p.From)
2431 r := int(p.From.Reg)
2433 r = c.getimpliedreg(&p.From, p)
2435 if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 && (r != 0 || v != 0) {
2436 c.ctxt.Diag("literal operation on R0\n%v", p)
2441 log.Fatalf("invalid handling of %v", p)
2443 // For UCON operands the value is right shifted 16, using ADDIS if the
2444 // value should be signed, ORIS if unsigned.
2446 if r == REGZERO && isuint32(uint64(d)) {
2447 o1 = LOP_IRR(OP_ORIS, uint32(p.To.Reg), REGZERO, uint32(v))
2452 } else if int64(int16(d)) != d {
2453 // Operand is 16 bit value with sign bit set
2454 if o.a1 == C_ANDCON {
2455 // Needs unsigned 16 bit so use ORI
2456 if r == 0 || r == REGZERO {
2457 o1 = LOP_IRR(uint32(OP_ORI), uint32(p.To.Reg), uint32(0), uint32(v))
2460 // With ADDCON, needs signed 16 bit value, fall through to use ADDI
2461 } else if o.a1 != C_ADDCON {
2462 log.Fatalf("invalid handling of %v", p)
2466 o1 = AOP_IRR(uint32(a), uint32(p.To.Reg), uint32(r), uint32(v))
2468 case 4: /* add/mul $scon,[r1],r2 */
2469 v := c.regoff(&p.From)
2475 if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 {
2476 c.ctxt.Diag("literal operation on R0\n%v", p)
2478 if int32(int16(v)) != v {
2479 log.Fatalf("mishandled instruction %v", p)
2481 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2483 case 5: /* syscall */
2486 case 6: /* logical op Rb,[Rs,]Ra; no literal */
2492 // AROTL and AROTLW are extended mnemonics, which map to RLDCL and RLWNM.
2495 o1 = AOP_RLDIC(OP_RLDCL, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), uint32(0))
2497 o1 = OP_RLW(OP_RLWNM, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), 0, 31)
2499 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2502 case 7: /* mov r, soreg ==> stw o(r) */
2506 r = c.getimpliedreg(&p.To, p)
2508 v := c.regoff(&p.To)
2509 if p.To.Type == obj.TYPE_MEM && p.To.Index != 0 {
2511 c.ctxt.Diag("illegal indexed instruction\n%v", p)
2513 o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(r))
2515 if int32(int16(v)) != v {
2516 log.Fatalf("mishandled instruction %v", p)
2518 // Offsets in DS form stores must be a multiple of 4
2519 inst := c.opstore(p.As)
2520 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
2521 log.Fatalf("invalid offset for DS form load/store %v", p)
2523 o1 = AOP_IRR(inst, uint32(p.From.Reg), uint32(r), uint32(v))
2526 case 8: /* mov soreg, r ==> lbz/lhz/lwz o(r), lbz o(r) + extsb r,r */
2527 r := int(p.From.Reg)
2530 r = c.getimpliedreg(&p.From, p)
2532 v := c.regoff(&p.From)
2533 if p.From.Type == obj.TYPE_MEM && p.From.Index != 0 {
2535 c.ctxt.Diag("illegal indexed instruction\n%v", p)
2537 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(r))
2539 if int32(int16(v)) != v {
2540 log.Fatalf("mishandled instruction %v", p)
2542 // Offsets in DS form loads must be a multiple of 4
2543 inst := c.opload(p.As)
2544 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
2545 log.Fatalf("invalid offset for DS form load/store %v", p)
2547 o1 = AOP_IRR(inst, uint32(p.To.Reg), uint32(r), uint32(v))
2550 // Sign extend MOVB operations. This is ignored for other cases (o.size == 4).
2551 o2 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
2553 case 10: /* sub Ra,[Rb],Rd => subf Rd,Ra,Rb */
2559 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(r))
2561 case 11: /* br/bl lbra */
2564 if p.To.Target() != nil {
2565 v = int32(p.To.Target().Pc - p.Pc)
2567 c.ctxt.Diag("odd branch target address\n%v", p)
2571 if v < -(1<<25) || v >= 1<<24 {
2572 c.ctxt.Diag("branch too far\n%v", p)
2576 o1 = OP_BR(c.opirr(p.As), uint32(v), 0)
2577 if p.To.Sym != nil {
2578 rel := obj.Addrel(c.cursym)
2579 rel.Off = int32(c.pc)
2582 v += int32(p.To.Offset)
2584 c.ctxt.Diag("odd branch target address\n%v", p)
2589 rel.Type = objabi.R_CALLPOWER
2591 o2 = 0x60000000 // nop, sometimes overwritten by ld r2, 24(r1) when dynamic linking
2593 case 13: /* mov[bhwd]{z,} r,r */
2594 // This needs to handle "MOV* $0, Rx". This shows up because $0 also
2595 // matches C_REG if r0iszero. This happens because C_REG sorts before C_ANDCON
2596 // TODO: fix the above behavior and cleanup this exception.
2597 if p.From.Type == obj.TYPE_CONST {
2598 o1 = LOP_IRR(OP_ADDI, REGZERO, uint32(p.To.Reg), 0)
2601 if p.To.Type == obj.TYPE_CONST {
2602 c.ctxt.Diag("cannot move into constant 0\n%v", p)
2607 o1 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2609 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 24, 31)
2611 o1 = LOP_RRR(OP_EXTSH, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2613 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 16, 31)
2615 o1 = LOP_RRR(OP_EXTSW, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2617 o1 = OP_RLW(OP_RLDIC, uint32(p.To.Reg), uint32(p.From.Reg), 0, 0, 0) | 1<<5 /* MB=32 */
2619 o1 = LOP_RRR(OP_OR, uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.From.Reg))
2621 c.ctxt.Diag("internal: bad register move/truncation\n%v", p)
2624 case 14: /* rldc[lr] Rb,Rs,$mask,Ra -- left, right give different masks */
2630 d := c.vregoff(p.GetFrom3())
2634 // These opcodes expect a mask operand that has to be converted into the
2635 // appropriate operand. The way these were defined, not all valid masks are possible.
2636 // Left here for compatibility in case they were used or generated.
2637 case ARLDCL, ARLDCLCC:
2639 c.maskgen64(p, mask[:], uint64(d))
2641 a = int(mask[0]) /* MB */
2643 c.ctxt.Diag("invalid mask for rotate: %x (end != bit 63)\n%v", uint64(d), p)
2645 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2646 o1 |= (uint32(a) & 31) << 6
2648 o1 |= 1 << 5 /* mb[5] is top bit */
2651 case ARLDCR, ARLDCRCC:
2653 c.maskgen64(p, mask[:], uint64(d))
2655 a = int(mask[1]) /* ME */
2657 c.ctxt.Diag("invalid mask for rotate: %x %x (start != 0)\n%v", uint64(d), mask[0], p)
2659 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2660 o1 |= (uint32(a) & 31) << 6
2662 o1 |= 1 << 5 /* mb[5] is top bit */
2665 // These opcodes use a shift count like the ppc64 asm, no mask conversion done
2666 case ARLDICR, ARLDICRCC:
2668 sh := c.regoff(&p.From)
2669 if me < 0 || me > 63 || sh > 63 {
2670 c.ctxt.Diag("Invalid me or sh for RLDICR: %x %x\n%v", int(d), sh, p)
2672 o1 = AOP_RLDIC(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(sh), uint32(me))
2674 case ARLDICL, ARLDICLCC, ARLDIC, ARLDICCC:
2676 sh := c.regoff(&p.From)
2677 if mb < 0 || mb > 63 || sh > 63 {
2678 c.ctxt.Diag("Invalid mb or sh for RLDIC, RLDICL: %x %x\n%v", mb, sh, p)
2680 o1 = AOP_RLDIC(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(sh), uint32(mb))
2683 // This is an extended mnemonic defined in the ISA section C.8.1
2684 // clrlsldi ra,rs,b,n --> rldic ra,rs,n,b-n
2685 // It maps onto RLDIC so is directly generated here based on the operands from
2688 b := c.regoff(&p.From)
2689 if n > b || b > 63 {
2690 c.ctxt.Diag("Invalid n or b for CLRLSLDI: %x %x\n%v", n, b, p)
2692 o1 = AOP_RLDIC(OP_RLDIC, uint32(p.To.Reg), uint32(r), uint32(n), uint32(b)-uint32(n))
2695 c.ctxt.Diag("unexpected op in rldc case\n%v", p)
2699 case 17, /* bc bo,bi,lbra (same for now) */
2700 16: /* bc bo,bi,sbra */
2705 if p.From.Type == obj.TYPE_CONST {
2706 a = int(c.regoff(&p.From))
2707 } else if p.From.Type == obj.TYPE_REG {
2709 c.ctxt.Diag("unexpected register setting for branch with CR: %d\n", r)
2711 // BI values for the CR
2730 c.ctxt.Diag("unrecognized register: expecting CR\n")
2734 if p.To.Target() != nil {
2735 v = int32(p.To.Target().Pc - p.Pc)
2738 c.ctxt.Diag("odd branch target address\n%v", p)
2742 if v < -(1<<16) || v >= 1<<15 {
2743 c.ctxt.Diag("branch too far\n%v", p)
2745 o1 = OP_BC(c.opirr(p.As), uint32(a), uint32(r), uint32(v), 0)
2747 case 15: /* br/bl (r) => mov r,lr; br/bl (lr) */
2749 if p.As == ABC || p.As == ABCL {
2750 v = c.regoff(&p.To) & 31
2752 v = 20 /* unconditional */
2754 o1 = AOP_RRR(OP_MTSPR, uint32(p.To.Reg), 0, 0) | (REG_LR&0x1f)<<16 | ((REG_LR>>5)&0x1f)<<11
2755 o2 = OPVCC(19, 16, 0, 0)
2756 if p.As == ABL || p.As == ABCL {
2759 o2 = OP_BCR(o2, uint32(v), uint32(p.To.Index))
2761 case 18: /* br/bl (lr/ctr); bc/bcl bo,bi,(lr/ctr) */
2764 if p.As == ABC || p.As == ABCL {
2765 v = c.regoff(&p.From) & 31
2767 v = 20 /* unconditional */
2773 switch oclass(&p.To) {
2775 o1 = OPVCC(19, 528, 0, 0)
2778 o1 = OPVCC(19, 16, 0, 0)
2781 c.ctxt.Diag("bad optab entry (18): %d\n%v", p.To.Class, p)
2785 // Insert optional branch hint for bclr[l]/bcctr[l]
2786 if p.From3Type() != obj.TYPE_NONE {
2787 bh = uint32(p.GetFrom3().Offset)
2788 if bh == 2 || bh > 3 {
2789 log.Fatalf("BH must be 0,1,3 for %v", p)
2794 if p.As == ABL || p.As == ABCL {
2797 o1 = OP_BCR(o1, uint32(v), uint32(r))
2799 case 19: /* mov $lcon,r ==> cau+or */
2800 d := c.vregoff(&p.From)
2801 o1 = loadu32(int(p.To.Reg), d)
2802 o2 = LOP_IRR(OP_ORI, uint32(p.To.Reg), uint32(p.To.Reg), uint32(int32(d)))
2804 case 20: /* add $ucon,,r | addis $addcon,r,r */
2805 v := c.regoff(&p.From)
2811 if p.As == AADD && (r0iszero == 0 /*TypeKind(100016)*/ && p.Reg == 0 || r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0) {
2812 c.ctxt.Diag("literal operation on R0\n%v", p)
2815 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2817 o1 = AOP_IRR(c.opirr(AADDIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
2820 case 22: /* add $lcon/$andcon,r1,r2 ==> oris+ori+add/ori+add */
2821 if p.To.Reg == REGTMP || p.Reg == REGTMP {
2822 c.ctxt.Diag("can't synthesize large constant\n%v", p)
2824 d := c.vregoff(&p.From)
2829 if p.From.Sym != nil {
2830 c.ctxt.Diag("%v is not supported", p)
2832 // If operand is ANDCON, generate 2 instructions using
2833 // ORI for unsigned value; with LCON 3 instructions.
2835 o1 = LOP_IRR(OP_ORI, REGTMP, REGZERO, uint32(int32(d)))
2836 o2 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2838 o1 = loadu32(REGTMP, d)
2839 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d)))
2840 o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2843 case 23: /* and $lcon/$addcon,r1,r2 ==> oris+ori+and/addi+and */
2844 if p.To.Reg == REGTMP || p.Reg == REGTMP {
2845 c.ctxt.Diag("can't synthesize large constant\n%v", p)
2847 d := c.vregoff(&p.From)
2853 // With ADDCON operand, generate 2 instructions using ADDI for signed value,
2854 // with LCON operand generate 3 instructions.
2856 o1 = LOP_IRR(OP_ADDI, REGZERO, REGTMP, uint32(int32(d)))
2857 o2 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2859 o1 = loadu32(REGTMP, d)
2860 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d)))
2861 o3 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2863 if p.From.Sym != nil {
2864 c.ctxt.Diag("%v is not supported", p)
2867 case 24: /* lfd fA,float64(0) -> xxlxor xsA,xsaA,xsaA + fneg for -0 */
2868 o1 = AOP_XX3I(c.oprrr(AXXLXOR), uint32(p.To.Reg), uint32(p.To.Reg), uint32(p.To.Reg), uint32(0))
2869 // This is needed for -0.
2871 o2 = AOP_RRR(c.oprrr(AFNEG), uint32(p.To.Reg), 0, uint32(p.To.Reg))
2875 /* sld[.] $sh,rS,rA -> rldicr[.] $sh,rS,mask(0,63-sh),rA; srd[.] -> rldicl */
2876 v := c.regoff(&p.From)
2904 c.ctxt.Diag("unexpected op in sldi case\n%v", p)
2909 if p.As == AEXTSWSLI || p.As == AEXTSWSLICC {
2910 o1 = AOP_EXTSWSLI(OP_EXTSWSLI, uint32(r), uint32(p.To.Reg), uint32(v))
2913 o1 = AOP_RLDIC(op, uint32(p.To.Reg), uint32(r), uint32(v), uint32(a))
2915 if p.As == ASLDCC || p.As == ASRDCC || p.As == AEXTSWSLICC {
2916 o1 |= 1 // Set the condition code bit
2919 case 26: /* mov $lsext/auto/oreg,,r2 ==> addis+addi */
2920 v := c.vregoff(&p.From)
2921 r := int(p.From.Reg)
2923 switch p.From.Name {
2924 case obj.NAME_EXTERN, obj.NAME_STATIC:
2925 // Load a 32 bit constant, or relocation depending on if a symbol is attached
2926 o1, o2 = c.symbolAccess(p.From.Sym, v, p.To.Reg, OP_ADDI, true)
2929 r = c.getimpliedreg(&p.From, p)
2931 // Add a 32 bit offset to a register.
2932 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), uint32(r), uint32(high16adjusted(int32(v))))
2933 o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), uint32(p.To.Reg), uint32(v))
2936 case 27: /* subc ra,$simm,rd => subfic rd,ra,$simm */
2937 v := c.regoff(p.GetFrom3())
2939 r := int(p.From.Reg)
2940 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2942 case 28: /* subc r1,$lcon,r2 ==> cau+or+subfc */
2943 if p.To.Reg == REGTMP || p.From.Reg == REGTMP {
2944 c.ctxt.Diag("can't synthesize large constant\n%v", p)
2946 v := c.regoff(p.GetFrom3())
2947 o1 = AOP_IRR(OP_ADDIS, REGTMP, REGZERO, uint32(v)>>16)
2948 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(v))
2949 o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), REGTMP)
2950 if p.From.Sym != nil {
2951 c.ctxt.Diag("%v is not supported", p)
2954 case 29: /* rldic[lr]? $sh,s,$mask,a -- left, right, plain give different masks */
2955 v := c.regoff(&p.From)
2957 d := c.vregoff(p.GetFrom3())
2959 c.maskgen64(p, mask[:], uint64(d))
2962 case ARLDC, ARLDCCC:
2963 a = int(mask[0]) /* MB */
2964 if int32(mask[1]) != (63 - v) {
2965 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[1], v, p)
2968 case ARLDCL, ARLDCLCC:
2969 a = int(mask[0]) /* MB */
2971 c.ctxt.Diag("invalid mask for shift: %x %s (shift %d)\n%v", uint64(d), mask[1], v, p)
2974 case ARLDCR, ARLDCRCC:
2975 a = int(mask[1]) /* ME */
2977 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[0], v, p)
2981 c.ctxt.Diag("unexpected op in rldic case\n%v", p)
2985 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
2986 o1 |= (uint32(a) & 31) << 6
2991 o1 |= 1 << 5 /* mb[5] is top bit */
2994 case 30: /* rldimi $sh,s,$mask,a */
2995 v := c.regoff(&p.From)
2997 d := c.vregoff(p.GetFrom3())
2999 // Original opcodes had mask operands which had to be converted to a shift count as expected by
3002 case ARLDMI, ARLDMICC:
3004 c.maskgen64(p, mask[:], uint64(d))
3005 if int32(mask[1]) != (63 - v) {
3006 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[1], v, p)
3008 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
3009 o1 |= (uint32(mask[0]) & 31) << 6
3013 if mask[0]&0x20 != 0 {
3014 o1 |= 1 << 5 /* mb[5] is top bit */
3017 // Opcodes with shift count operands.
3018 case ARLDIMI, ARLDIMICC:
3019 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
3020 o1 |= (uint32(d) & 31) << 6
3029 case 31: /* dword */
3030 d := c.vregoff(&p.From)
3032 if c.ctxt.Arch.ByteOrder == binary.BigEndian {
3033 o1 = uint32(d >> 32)
3037 o2 = uint32(d >> 32)
3040 if p.From.Sym != nil {
3041 rel := obj.Addrel(c.cursym)
3042 rel.Off = int32(c.pc)
3044 rel.Sym = p.From.Sym
3045 rel.Add = p.From.Offset
3046 rel.Type = objabi.R_ADDR
3051 case 32: /* fmul frc,fra,frd */
3057 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0) | (uint32(p.From.Reg)&31)<<6
3059 case 33: /* fabs [frb,]frd; fmr. frb,frd */
3060 r := int(p.From.Reg)
3062 if oclass(&p.From) == C_NONE {
3065 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(r))
3067 case 34: /* FMADDx fra,frb,frc,frt (t=a*c±b) */
3068 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg)) | (uint32(p.GetFrom3().Reg)&31)<<6
3070 case 35: /* mov r,lext/lauto/loreg ==> cau $(v>>16),sb,r'; store o(r') */
3071 v := c.regoff(&p.To)
3075 r = c.getimpliedreg(&p.To, p)
3077 // Offsets in DS form stores must be a multiple of 4
3078 inst := c.opstore(p.As)
3079 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3080 log.Fatalf("invalid offset for DS form load/store %v", p)
3082 o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v)))
3083 o2 = AOP_IRR(inst, uint32(p.From.Reg), REGTMP, uint32(v))
3085 case 36: /* mov b/bz/h/hz lext/lauto/lreg,r ==> lbz+extsb/lbz/lha/lhz etc */
3086 v := c.regoff(&p.From)
3088 r := int(p.From.Reg)
3090 r = c.getimpliedreg(&p.From, p)
3092 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), uint32(r), uint32(high16adjusted(v)))
3093 o2 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), uint32(p.To.Reg), uint32(v))
3095 // Sign extend MOVB if needed
3096 o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3099 o1 = uint32(c.regoff(&p.From))
3101 case 41: /* stswi */
3102 o1 = AOP_RRR(c.opirr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11
3105 o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11
3107 case 43: /* data cache instructions: op (Ra+[Rb]), [th|l] */
3108 /* TH field for dcbt/dcbtst: */
3109 /* 0 = Block access - program will soon access EA. */
3110 /* 8-15 = Stream access - sequence of access (data stream). See section 4.3.2 of the ISA for details. */
3111 /* 16 = Block access - program will soon make a transient access to EA. */
3112 /* 17 = Block access - program will not access EA for a long time. */
3114 /* L field for dcbf: */
3115 /* 0 = invalidates the block containing EA in all processors. */
3116 /* 1 = same as 0, but with limited scope (i.e. block in the current processor will not be reused soon). */
3117 /* 3 = same as 1, but with even more limited scope (i.e. block in the current processor primary cache will not be reused soon). */
3118 if p.To.Type == obj.TYPE_NONE {
3119 o1 = AOP_RRR(c.oprrr(p.As), 0, uint32(p.From.Index), uint32(p.From.Reg))
3121 th := c.regoff(&p.To)
3122 o1 = AOP_RRR(c.oprrr(p.As), uint32(th), uint32(p.From.Index), uint32(p.From.Reg))
3125 case 44: /* indexed store */
3126 o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg))
3128 case 45: /* indexed load */
3130 /* The assembler accepts a 4-operand l*arx instruction. The fourth operand is an Exclusive Access Hint (EH) */
3131 /* The EH field can be used as a lock acquire/release hint as follows: */
3132 /* 0 = Atomic Update (fetch-and-operate or similar algorithm) */
3133 /* 1 = Exclusive Access (lock acquire and release) */
3134 case ALBAR, ALHAR, ALWAR, ALDAR:
3135 if p.From3Type() != obj.TYPE_NONE {
3136 eh := int(c.regoff(p.GetFrom3()))
3138 c.ctxt.Diag("illegal EH field\n%v", p)
3140 o1 = AOP_RRRI(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg), uint32(eh))
3142 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3145 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3147 case 46: /* plain op */
3150 case 47: /* op Ra, Rd; also op [Ra,] Rd */
3151 r := int(p.From.Reg)
3156 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0)
3158 case 48: /* op Rs, Ra */
3159 r := int(p.From.Reg)
3164 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0)
3166 case 49: /* op Rb; op $n, Rb */
3167 if p.From.Type != obj.TYPE_REG { /* tlbie $L, rB */
3168 v := c.regoff(&p.From) & 1
3169 o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.To.Reg)) | uint32(v)<<21
3171 o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.From.Reg))
3174 case 50: /* rem[u] r1[,r2],r3 */
3181 t := v & (1<<10 | 1) /* OE|Rc */
3182 o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg))
3183 o2 = AOP_RRR(OP_MULLW, REGTMP, REGTMP, uint32(p.From.Reg))
3184 o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r))
3188 /* Clear top 32 bits */
3189 o3 = OP_RLW(OP_RLDIC, REGTMP, REGTMP, 0, 0, 0) | 1<<5
3192 case 51: /* remd[u] r1[,r2],r3 */
3199 t := v & (1<<10 | 1) /* OE|Rc */
3200 o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg))
3201 o2 = AOP_RRR(OP_MULLD, REGTMP, REGTMP, uint32(p.From.Reg))
3202 o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r))
3203 /* cases 50,51: removed; can be reused. */
3205 /* cases 50,51: removed; can be reused. */
3207 case 52: /* mtfsbNx cr(n) */
3208 v := c.regoff(&p.From) & 31
3210 o1 = AOP_RRR(c.oprrr(p.As), uint32(v), 0, 0)
3212 case 53: /* mffsX ,fr1 */
3213 o1 = AOP_RRR(OP_MFFS, uint32(p.To.Reg), 0, 0)
3215 case 55: /* op Rb, Rd */
3216 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(p.From.Reg))
3218 case 56: /* sra $sh,[s,]a; srd $sh,[s,]a */
3219 v := c.regoff(&p.From)
3225 o1 = AOP_RRR(c.opirr(p.As), uint32(r), uint32(p.To.Reg), uint32(v)&31)
3226 if (p.As == ASRAD || p.As == ASRADCC) && (v&0x20 != 0) {
3227 o1 |= 1 << 1 /* mb[5] */
3230 case 57: /* slw $sh,[s,]a -> rlwinm ... */
3231 v := c.regoff(&p.From)
3239 * Let user (gs) shoot himself in the foot.
3240 * qc has already complained.
3243 ctxt->diag("illegal shift %ld\n%v", v, p);
3253 mask[0], mask[1] = 0, 31
3255 mask[0], mask[1] = uint8(v), 31
3258 mask[0], mask[1] = 0, uint8(31-v)
3260 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(r), uint32(v), uint32(mask[0]), uint32(mask[1]))
3261 if p.As == ASLWCC || p.As == ASRWCC {
3262 o1 |= 1 // set the condition code
3265 case 58: /* logical $andcon,[s],a */
3266 v := c.regoff(&p.From)
3272 o1 = LOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3274 case 59: /* or/xor/and $ucon,,r | oris/xoris/andis $addcon,r,r */
3275 v := c.regoff(&p.From)
3283 o1 = LOP_IRR(c.opirr(AORIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16) /* oris, xoris, andis. */
3285 o1 = LOP_IRR(c.opirr(AXORIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
3287 o1 = LOP_IRR(c.opirr(AANDISCC), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
3289 o1 = LOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3292 case 60: /* tw to,a,b */
3293 r := int(c.regoff(&p.From) & 31)
3295 o1 = AOP_RRR(c.oprrr(p.As), uint32(r), uint32(p.Reg), uint32(p.To.Reg))
3297 case 61: /* tw to,a,$simm */
3298 r := int(c.regoff(&p.From) & 31)
3300 v := c.regoff(&p.To)
3301 o1 = AOP_IRR(c.opirr(p.As), uint32(r), uint32(p.Reg), uint32(v))
3303 case 62: /* rlwmi $sh,s,$mask,a */
3304 v := c.regoff(&p.From)
3307 n := c.regoff(p.GetFrom3())
3308 // This is an extended mnemonic described in the ISA C.8.2
3309 // clrlslwi ra,rs,b,n -> rlwinm ra,rs,n,b-n,31-n
3310 // It maps onto rlwinm which is directly generated here.
3311 if n > v || v >= 32 {
3312 c.ctxt.Diag("Invalid n or b for CLRLSLWI: %x %x\n%v", v, n, p)
3315 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.Reg), uint32(n), uint32(v-n), uint32(31-n))
3318 c.maskgen(p, mask[:], uint32(c.regoff(p.GetFrom3())))
3319 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), uint32(v))
3320 o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1
3323 case 63: /* rlwmi b,s,$mask,a */
3325 c.maskgen(p, mask[:], uint32(c.regoff(p.GetFrom3())))
3326 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.Reg), uint32(p.To.Reg), uint32(p.From.Reg))
3327 o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1
3329 case 64: /* mtfsf fr[, $m] {,fpcsr} */
3331 if p.From3Type() != obj.TYPE_NONE {
3332 v = c.regoff(p.GetFrom3()) & 255
3336 o1 = OP_MTFSF | uint32(v)<<17 | uint32(p.From.Reg)<<11
3338 case 65: /* MOVFL $imm,FPSCR(n) => mtfsfi crfd,imm */
3340 c.ctxt.Diag("must specify FPSCR(n)\n%v", p)
3342 o1 = OP_MTFSFI | (uint32(p.To.Reg)&15)<<23 | (uint32(c.regoff(&p.From))&31)<<12
3344 case 66: /* mov spr,r1; mov r1,spr, also dcr */
3347 if REG_R0 <= p.From.Reg && p.From.Reg <= REG_R31 {
3350 if REG_DCR0 <= v && v <= REG_DCR0+1023 {
3351 o1 = OPVCC(31, 451, 0, 0) /* mtdcr */
3353 o1 = OPVCC(31, 467, 0, 0) /* mtspr */
3357 v = int32(p.From.Reg)
3358 if REG_DCR0 <= v && v <= REG_DCR0+1023 {
3359 o1 = OPVCC(31, 323, 0, 0) /* mfdcr */
3361 o1 = OPVCC(31, 339, 0, 0) /* mfspr */
3365 o1 = AOP_RRR(o1, uint32(r), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11
3367 case 67: /* mcrf crfD,crfS */
3368 if p.From.Type != obj.TYPE_REG || p.From.Reg < REG_CR0 || REG_CR7 < p.From.Reg || p.To.Type != obj.TYPE_REG || p.To.Reg < REG_CR0 || REG_CR7 < p.To.Reg {
3369 c.ctxt.Diag("illegal CR field number\n%v", p)
3371 o1 = AOP_RRR(OP_MCRF, ((uint32(p.To.Reg) & 7) << 2), ((uint32(p.From.Reg) & 7) << 2), 0)
3373 case 68: /* mfcr rD; mfocrf CRM,rD */
3374 if p.From.Type == obj.TYPE_REG && REG_CR0 <= p.From.Reg && p.From.Reg <= REG_CR7 {
3375 v := int32(1 << uint(7-(p.To.Reg&7))) /* CR(n) */
3376 o1 = AOP_RRR(OP_MFCR, uint32(p.To.Reg), 0, 0) | 1<<20 | uint32(v)<<12 /* new form, mfocrf */
3378 o1 = AOP_RRR(OP_MFCR, uint32(p.To.Reg), 0, 0) /* old form, whole register */
3381 case 69: /* mtcrf CRM,rS */
3383 if p.From3Type() != obj.TYPE_NONE {
3385 c.ctxt.Diag("can't use both mask and CR(n)\n%v", p)
3387 v = c.regoff(p.GetFrom3()) & 0xff
3392 v = 1 << uint(7-(p.To.Reg&7)) /* CR(n) */
3396 o1 = AOP_RRR(OP_MTCRF, uint32(p.From.Reg), 0, 0) | uint32(v)<<12
3398 case 70: /* [f]cmp r,r,cr*/
3403 r = (int(p.Reg) & 7) << 2
3405 o1 = AOP_RRR(c.oprrr(p.As), uint32(r), uint32(p.From.Reg), uint32(p.To.Reg))
3407 case 71: /* cmp[l] r,i,cr*/
3412 r = (int(p.Reg) & 7) << 2
3414 o1 = AOP_RRR(c.opirr(p.As), uint32(r), uint32(p.From.Reg), 0) | uint32(c.regoff(&p.To))&0xffff
3416 case 72: /* slbmte (Rb+Rs -> slb[Rb]) -> Rs, Rb */
3417 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), 0, uint32(p.To.Reg))
3419 case 73: /* mcrfs crfD,crfS */
3420 if p.From.Type != obj.TYPE_REG || p.From.Reg != REG_FPSCR || p.To.Type != obj.TYPE_REG || p.To.Reg < REG_CR0 || REG_CR7 < p.To.Reg {
3421 c.ctxt.Diag("illegal FPSCR/CR field number\n%v", p)
3423 o1 = AOP_RRR(OP_MCRFS, ((uint32(p.To.Reg) & 7) << 2), ((0 & 7) << 2), 0)
3425 case 77: /* syscall $scon, syscall Rx */
3426 if p.From.Type == obj.TYPE_CONST {
3427 if p.From.Offset > BIG || p.From.Offset < -BIG {
3428 c.ctxt.Diag("illegal syscall, sysnum too large: %v", p)
3430 o1 = AOP_IRR(OP_ADDI, REGZERO, REGZERO, uint32(p.From.Offset))
3431 } else if p.From.Type == obj.TYPE_REG {
3432 o1 = LOP_RRR(OP_OR, REGZERO, uint32(p.From.Reg), uint32(p.From.Reg))
3434 c.ctxt.Diag("illegal syscall: %v", p)
3435 o1 = 0x7fe00008 // trap always
3439 o3 = AOP_RRR(c.oprrr(AXOR), REGZERO, REGZERO, REGZERO) // XOR R0, R0
3441 case 78: /* undef */
3442 o1 = 0 /* "An instruction consisting entirely of binary 0s is guaranteed
3443 always to be an illegal instruction." */
3445 /* relocation operations */
3447 v := c.vregoff(&p.To)
3448 // Offsets in DS form stores must be a multiple of 4
3449 inst := c.opstore(p.As)
3450 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3451 log.Fatalf("invalid offset for DS form load/store %v", p)
3453 // Can't reuse base for store instructions.
3454 o1, o2 = c.symbolAccess(p.To.Sym, v, p.From.Reg, inst, false)
3456 case 75: // 32 bit offset symbol loads (got/toc/addr)
3459 // Offsets in DS form loads must be a multiple of 4
3460 inst := c.opload(p.As)
3461 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3462 log.Fatalf("invalid offset for DS form load/store %v", p)
3464 switch p.From.Name {
3465 case obj.NAME_GOTREF, obj.NAME_TOCREF:
3467 c.ctxt.Diag("invalid offset for GOT/TOC access %v", p)
3469 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
3470 o2 = AOP_IRR(inst, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3471 rel := obj.Addrel(c.cursym)
3472 rel.Off = int32(c.pc)
3474 rel.Sym = p.From.Sym
3475 switch p.From.Name {
3476 case obj.NAME_GOTREF:
3477 rel.Type = objabi.R_ADDRPOWER_GOT
3478 case obj.NAME_TOCREF:
3479 rel.Type = objabi.R_ADDRPOWER_TOCREL_DS
3482 reuseBaseReg := p.As != AFMOVD && p.As != AFMOVS
3483 // Reuse To.Reg as base register if not FP move.
3484 o1, o2 = c.symbolAccess(p.From.Sym, v, p.To.Reg, inst, reuseBaseReg)
3487 o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3490 if p.From.Offset != 0 {
3491 c.ctxt.Diag("invalid offset against tls var %v", p)
3493 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R13, 0)
3494 o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3495 rel := obj.Addrel(c.cursym)
3496 rel.Off = int32(c.pc)
3498 rel.Sym = p.From.Sym
3499 rel.Type = objabi.R_POWER_TLS_LE
3502 if p.From.Offset != 0 {
3503 c.ctxt.Diag("invalid offset against tls var %v", p)
3505 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
3506 o2 = AOP_IRR(c.opload(AMOVD), uint32(p.To.Reg), uint32(p.To.Reg), 0)
3507 o3 = AOP_RRR(OP_ADD, uint32(p.To.Reg), uint32(p.To.Reg), REG_R13)
3508 rel := obj.Addrel(c.cursym)
3509 rel.Off = int32(c.pc)
3511 rel.Sym = p.From.Sym
3512 rel.Type = objabi.R_POWER_TLS_IE
3513 rel = obj.Addrel(c.cursym)
3514 rel.Off = int32(c.pc) + 8
3516 rel.Sym = p.From.Sym
3517 rel.Type = objabi.R_POWER_TLS
3519 case 82: /* vector instructions, VX-form and VC-form */
3520 if p.From.Type == obj.TYPE_REG {
3521 /* reg reg none OR reg reg reg */
3522 /* 3-register operand order: VRA, VRB, VRT */
3523 /* 2-register operand order: VRA, VRT */
3524 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3525 } else if p.From3Type() == obj.TYPE_CONST {
3526 /* imm imm reg reg */
3527 /* operand order: SIX, VRA, ST, VRT */
3528 six := int(c.regoff(&p.From))
3529 st := int(c.regoff(p.GetFrom3()))
3530 o1 = AOP_IIRR(c.opiirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(st), uint32(six))
3531 } else if p.From3Type() == obj.TYPE_NONE && p.Reg != 0 {
3533 /* operand order: UIM, VRB, VRT */
3534 uim := int(c.regoff(&p.From))
3535 o1 = AOP_VIRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(uim))
3538 /* operand order: SIM, VRT */
3539 sim := int(c.regoff(&p.From))
3540 o1 = AOP_IR(c.opirr(p.As), uint32(p.To.Reg), uint32(sim))
3543 case 83: /* vector instructions, VA-form */
3544 if p.From.Type == obj.TYPE_REG {
3545 /* reg reg reg reg */
3546 /* 4-register operand order: VRA, VRB, VRC, VRT */
3547 o1 = AOP_RRRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg))
3548 } else if p.From.Type == obj.TYPE_CONST {
3549 /* imm reg reg reg */
3550 /* operand order: SHB, VRA, VRB, VRT */
3551 shb := int(c.regoff(&p.From))
3552 o1 = AOP_IRRR(c.opirrr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(shb))
3555 case 84: // ISEL BC,RA,RB,RT -> isel rt,ra,rb,bc
3556 bc := c.vregoff(&p.From)
3558 // rt = To.Reg, ra = p.Reg, rb = p.From3.Reg
3559 o1 = AOP_ISEL(OP_ISEL, uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(bc))
3561 case 85: /* vector instructions, VX-form */
3563 /* 2-register operand order: VRB, VRT */
3564 o1 = AOP_RR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg))
3566 case 86: /* VSX indexed store, XX1-form */
3568 /* 3-register operand order: XT, (RB)(RA*1) */
3569 o1 = AOP_XX1(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg))
3571 case 87: /* VSX indexed load, XX1-form */
3573 /* 3-register operand order: (RB)(RA*1), XT */
3574 o1 = AOP_XX1(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3576 case 88: /* VSX mfvsr* instructions, XX1-form XS,RA */
3577 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3579 case 89: /* VSX instructions, XX2-form */
3580 /* reg none reg OR reg imm reg */
3581 /* 2-register operand order: XB, XT or XB, UIM, XT*/
3582 uim := int(c.regoff(p.GetFrom3()))
3583 o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(uim), uint32(p.From.Reg))
3585 case 90: /* VSX instructions, XX3-form */
3586 if p.From3Type() == obj.TYPE_NONE {
3588 /* 3-register operand order: XA, XB, XT */
3589 o1 = AOP_XX3(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3590 } else if p.From3Type() == obj.TYPE_CONST {
3591 /* reg reg reg imm */
3592 /* operand order: XA, XB, DM, XT */
3593 dm := int(c.regoff(p.GetFrom3()))
3594 o1 = AOP_XX3I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(dm))
3597 case 91: /* VSX instructions, XX4-form */
3598 /* reg reg reg reg */
3599 /* 3-register operand order: XA, XB, XC, XT */
3600 o1 = AOP_XX4(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg))
3602 case 92: /* X-form instructions, 3-operands */
3603 if p.To.Type == obj.TYPE_CONST {
3605 xf := int32(p.From.Reg)
3606 if REG_F0 <= xf && xf <= REG_F31 {
3607 /* operand order: FRA, FRB, BF */
3608 bf := int(c.regoff(&p.To)) << 2
3609 o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg))
3611 /* operand order: RA, RB, L */
3612 l := int(c.regoff(&p.To))
3613 o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.From.Reg), uint32(p.Reg))
3615 } else if p.From3Type() == obj.TYPE_CONST {
3617 /* operand order: RB, L, RA */
3618 l := int(c.regoff(p.GetFrom3()))
3619 o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.To.Reg), uint32(p.From.Reg))
3620 } else if p.To.Type == obj.TYPE_REG {
3621 cr := int32(p.To.Reg)
3622 if REG_CR0 <= cr && cr <= REG_CR7 {
3624 /* operand order: RA, RB, BF */
3625 bf := (int(p.To.Reg) & 7) << 2
3626 o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg))
3627 } else if p.From.Type == obj.TYPE_CONST {
3629 /* operand order: L, RT */
3630 l := int(c.regoff(&p.From))
3631 o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(l), uint32(p.Reg))
3634 case ACOPY, APASTECC:
3635 o1 = AOP_RRR(c.opirr(p.As), uint32(1), uint32(p.From.Reg), uint32(p.To.Reg))
3638 /* operand order: RS, RB, RA */
3639 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3644 case 93: /* X-form instructions, 2-operands */
3645 if p.To.Type == obj.TYPE_CONST {
3647 /* operand order: FRB, BF */
3648 bf := int(c.regoff(&p.To)) << 2
3649 o1 = AOP_RR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg))
3650 } else if p.Reg == 0 {
3651 /* popcnt* r,r, X-form */
3652 /* operand order: RS, RA */
3653 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3656 case 94: /* Z23-form instructions, 4-operands */
3657 /* reg reg reg imm */
3658 /* operand order: RA, RB, CY, RT */
3659 cy := int(c.regoff(p.GetFrom3()))
3660 o1 = AOP_Z23I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(cy))
3662 case 96: /* VSX load, DQ-form */
3664 /* operand order: (RA)(DQ), XT */
3665 dq := int16(c.regoff(&p.From))
3667 c.ctxt.Diag("invalid offset for DQ form load/store %v", dq)
3669 o1 = AOP_DQ(c.opload(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(dq))
3671 case 97: /* VSX store, DQ-form */
3673 /* operand order: XT, (RA)(DQ) */
3674 dq := int16(c.regoff(&p.To))
3676 c.ctxt.Diag("invalid offset for DQ form load/store %v", dq)
3678 o1 = AOP_DQ(c.opstore(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(dq))
3679 case 98: /* VSX indexed load or load with length (also left-justified), x-form */
3680 /* vsreg, reg, reg */
3681 o1 = AOP_XX1(c.opload(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3682 case 99: /* VSX store with length (also left-justified) x-form */
3683 /* reg, reg, vsreg */
3684 o1 = AOP_XX1(c.opstore(p.As), uint32(p.From.Reg), uint32(p.Reg), uint32(p.To.Reg))
3685 case 100: /* VSX X-form XXSPLTIB */
3686 if p.From.Type == obj.TYPE_CONST {
3688 uim := int(c.regoff(&p.From))
3690 /* Use AOP_XX1 form with 0 for one of the registers. */
3691 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.To.Reg), uint32(0), uint32(uim))
3693 c.ctxt.Diag("invalid ops for %v", p.As)
3696 o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(0), uint32(p.From.Reg))
3698 case 102: /* RLWMI $sh,rs,$mb,$me,rt (M-form opcode)*/
3699 mb := uint32(c.regoff(&p.RestArgs[0].Addr))
3700 me := uint32(c.regoff(&p.RestArgs[1].Addr))
3701 sh := uint32(c.regoff(&p.From))
3702 o1 = OP_RLW(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), sh, mb, me)
3704 case 103: /* RLWMI rb,rs,$mb,$me,rt (M-form opcode)*/
3705 mb := uint32(c.regoff(&p.RestArgs[0].Addr))
3706 me := uint32(c.regoff(&p.RestArgs[1].Addr))
3707 o1 = OP_RLW(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.From.Reg), mb, me)
3709 case 104: /* VSX mtvsr* instructions, XX1-form RA,RB,XT */
3710 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3720 func (c *ctxt9) vregoff(a *obj.Addr) int64 {
3728 func (c *ctxt9) regoff(a *obj.Addr) int32 {
3729 return int32(c.vregoff(a))
3732 func (c *ctxt9) oprrr(a obj.As) uint32 {
3735 return OPVCC(31, 266, 0, 0)
3737 return OPVCC(31, 266, 0, 1)
3739 return OPVCC(31, 266, 1, 0)
3741 return OPVCC(31, 266, 1, 1)
3743 return OPVCC(31, 10, 0, 0)
3745 return OPVCC(31, 10, 0, 1)
3747 return OPVCC(31, 10, 1, 0)
3749 return OPVCC(31, 10, 1, 1)
3751 return OPVCC(31, 138, 0, 0)
3753 return OPVCC(31, 138, 0, 1)
3755 return OPVCC(31, 138, 1, 0)
3757 return OPVCC(31, 138, 1, 1)
3759 return OPVCC(31, 234, 0, 0)
3761 return OPVCC(31, 234, 0, 1)
3763 return OPVCC(31, 234, 1, 0)
3765 return OPVCC(31, 234, 1, 1)
3767 return OPVCC(31, 202, 0, 0)
3769 return OPVCC(31, 202, 0, 1)
3771 return OPVCC(31, 202, 1, 0)
3773 return OPVCC(31, 202, 1, 1)
3775 return OPVCC(31, 170, 0, 0) /* addex - v3.0b */
3778 return OPVCC(31, 28, 0, 0)
3780 return OPVCC(31, 28, 0, 1)
3782 return OPVCC(31, 60, 0, 0)
3784 return OPVCC(31, 60, 0, 1)
3787 return OPVCC(31, 0, 0, 0) | 1<<21 /* L=1 */
3789 return OPVCC(31, 32, 0, 0) | 1<<21
3791 return OPVCC(31, 0, 0, 0) /* L=0 */
3793 return OPVCC(31, 32, 0, 0)
3795 return OPVCC(31, 508, 0, 0) /* cmpb - v2.05 */
3797 return OPVCC(31, 224, 0, 0) /* cmpeqb - v3.00 */
3800 return OPVCC(31, 26, 0, 0)
3802 return OPVCC(31, 26, 0, 1)
3804 return OPVCC(31, 58, 0, 0)
3806 return OPVCC(31, 58, 0, 1)
3809 return OPVCC(19, 257, 0, 0)
3811 return OPVCC(19, 129, 0, 0)
3813 return OPVCC(19, 289, 0, 0)
3815 return OPVCC(19, 225, 0, 0)
3817 return OPVCC(19, 33, 0, 0)
3819 return OPVCC(19, 449, 0, 0)
3821 return OPVCC(19, 417, 0, 0)
3823 return OPVCC(19, 193, 0, 0)
3826 return OPVCC(31, 86, 0, 0)
3828 return OPVCC(31, 470, 0, 0)
3830 return OPVCC(31, 54, 0, 0)
3832 return OPVCC(31, 278, 0, 0)
3834 return OPVCC(31, 246, 0, 0)
3836 return OPVCC(31, 1014, 0, 0)
3839 return OPVCC(31, 265, 0, 0) /* modud - v3.0 */
3841 return OPVCC(31, 267, 0, 0) /* moduw - v3.0 */
3843 return OPVCC(31, 777, 0, 0) /* modsd - v3.0 */
3845 return OPVCC(31, 779, 0, 0) /* modsw - v3.0 */
3848 return OPVCC(31, 491, 0, 0)
3851 return OPVCC(31, 491, 0, 1)
3854 return OPVCC(31, 491, 1, 0)
3857 return OPVCC(31, 491, 1, 1)
3860 return OPVCC(31, 459, 0, 0)
3863 return OPVCC(31, 459, 0, 1)
3866 return OPVCC(31, 459, 1, 0)
3869 return OPVCC(31, 459, 1, 1)
3872 return OPVCC(31, 489, 0, 0)
3875 return OPVCC(31, 489, 0, 1)
3878 return OPVCC(31, 425, 0, 0)
3881 return OPVCC(31, 425, 0, 1)
3884 return OPVCC(31, 393, 0, 0)
3887 return OPVCC(31, 393, 0, 1)
3890 return OPVCC(31, 489, 1, 0)
3893 return OPVCC(31, 489, 1, 1)
3895 case ADIVDU, AREMDU:
3896 return OPVCC(31, 457, 0, 0)
3899 return OPVCC(31, 457, 0, 1)
3902 return OPVCC(31, 457, 1, 0)
3905 return OPVCC(31, 457, 1, 1)
3908 return OPVCC(31, 854, 0, 0)
3911 return OPVCC(31, 284, 0, 0)
3913 return OPVCC(31, 284, 0, 1)
3916 return OPVCC(31, 954, 0, 0)
3918 return OPVCC(31, 954, 0, 1)
3920 return OPVCC(31, 922, 0, 0)
3922 return OPVCC(31, 922, 0, 1)
3924 return OPVCC(31, 986, 0, 0)
3926 return OPVCC(31, 986, 0, 1)
3929 return OPVCC(63, 264, 0, 0)
3931 return OPVCC(63, 264, 0, 1)
3933 return OPVCC(63, 21, 0, 0)
3935 return OPVCC(63, 21, 0, 1)
3937 return OPVCC(59, 21, 0, 0)
3939 return OPVCC(59, 21, 0, 1)
3941 return OPVCC(63, 32, 0, 0)
3943 return OPVCC(63, 0, 0, 0)
3945 return OPVCC(63, 846, 0, 0)
3947 return OPVCC(63, 846, 0, 1)
3949 return OPVCC(63, 974, 0, 0)
3951 return OPVCC(63, 974, 0, 1)
3953 return OPVCC(59, 846, 0, 0)
3955 return OPVCC(59, 846, 0, 1)
3957 return OPVCC(63, 14, 0, 0)
3959 return OPVCC(63, 14, 0, 1)
3961 return OPVCC(63, 15, 0, 0)
3963 return OPVCC(63, 15, 0, 1)
3965 return OPVCC(63, 814, 0, 0)
3967 return OPVCC(63, 814, 0, 1)
3969 return OPVCC(63, 815, 0, 0)
3971 return OPVCC(63, 815, 0, 1)
3973 return OPVCC(63, 18, 0, 0)
3975 return OPVCC(63, 18, 0, 1)
3977 return OPVCC(59, 18, 0, 0)
3979 return OPVCC(59, 18, 0, 1)
3981 return OPVCC(63, 29, 0, 0)
3983 return OPVCC(63, 29, 0, 1)
3985 return OPVCC(59, 29, 0, 0)
3987 return OPVCC(59, 29, 0, 1)
3989 case AFMOVS, AFMOVD:
3990 return OPVCC(63, 72, 0, 0) /* load */
3992 return OPVCC(63, 72, 0, 1)
3994 return OPVCC(63, 28, 0, 0)
3996 return OPVCC(63, 28, 0, 1)
3998 return OPVCC(59, 28, 0, 0)
4000 return OPVCC(59, 28, 0, 1)
4002 return OPVCC(63, 25, 0, 0)
4004 return OPVCC(63, 25, 0, 1)
4006 return OPVCC(59, 25, 0, 0)
4008 return OPVCC(59, 25, 0, 1)
4010 return OPVCC(63, 136, 0, 0)
4012 return OPVCC(63, 136, 0, 1)
4014 return OPVCC(63, 40, 0, 0)
4016 return OPVCC(63, 40, 0, 1)
4018 return OPVCC(63, 31, 0, 0)
4020 return OPVCC(63, 31, 0, 1)
4022 return OPVCC(59, 31, 0, 0)
4024 return OPVCC(59, 31, 0, 1)
4026 return OPVCC(63, 30, 0, 0)
4028 return OPVCC(63, 30, 0, 1)
4030 return OPVCC(59, 30, 0, 0)
4032 return OPVCC(59, 30, 0, 1)
4034 return OPVCC(63, 8, 0, 0)
4036 return OPVCC(63, 8, 0, 1)
4038 return OPVCC(59, 24, 0, 0)
4040 return OPVCC(59, 24, 0, 1)
4042 return OPVCC(63, 488, 0, 0)
4044 return OPVCC(63, 488, 0, 1)
4046 return OPVCC(63, 456, 0, 0)
4048 return OPVCC(63, 456, 0, 1)
4050 return OPVCC(63, 424, 0, 0)
4052 return OPVCC(63, 424, 0, 1)
4054 return OPVCC(63, 392, 0, 0)
4056 return OPVCC(63, 392, 0, 1)
4058 return OPVCC(63, 12, 0, 0)
4060 return OPVCC(63, 12, 0, 1)
4062 return OPVCC(63, 26, 0, 0)
4064 return OPVCC(63, 26, 0, 1)
4066 return OPVCC(63, 23, 0, 0)
4068 return OPVCC(63, 23, 0, 1)
4070 return OPVCC(63, 22, 0, 0)
4072 return OPVCC(63, 22, 0, 1)
4074 return OPVCC(59, 22, 0, 0)
4076 return OPVCC(59, 22, 0, 1)
4078 return OPVCC(63, 20, 0, 0)
4080 return OPVCC(63, 20, 0, 1)
4082 return OPVCC(59, 20, 0, 0)
4084 return OPVCC(59, 20, 0, 1)
4087 return OPVCC(31, 982, 0, 0)
4089 return OPVCC(19, 150, 0, 0)
4092 return OPVCC(63, 70, 0, 0)
4094 return OPVCC(63, 70, 0, 1)
4096 return OPVCC(63, 38, 0, 0)
4098 return OPVCC(63, 38, 0, 1)
4101 return OPVCC(31, 75, 0, 0)
4103 return OPVCC(31, 75, 0, 1)
4105 return OPVCC(31, 11, 0, 0)
4107 return OPVCC(31, 11, 0, 1)
4109 return OPVCC(31, 235, 0, 0)
4111 return OPVCC(31, 235, 0, 1)
4113 return OPVCC(31, 235, 1, 0)
4115 return OPVCC(31, 235, 1, 1)
4118 return OPVCC(31, 73, 0, 0)
4120 return OPVCC(31, 73, 0, 1)
4122 return OPVCC(31, 9, 0, 0)
4124 return OPVCC(31, 9, 0, 1)
4126 return OPVCC(31, 233, 0, 0)
4128 return OPVCC(31, 233, 0, 1)
4130 return OPVCC(31, 233, 1, 0)
4132 return OPVCC(31, 233, 1, 1)
4135 return OPVCC(31, 476, 0, 0)
4137 return OPVCC(31, 476, 0, 1)
4139 return OPVCC(31, 104, 0, 0)
4141 return OPVCC(31, 104, 0, 1)
4143 return OPVCC(31, 104, 1, 0)
4145 return OPVCC(31, 104, 1, 1)
4147 return OPVCC(31, 124, 0, 0)
4149 return OPVCC(31, 124, 0, 1)
4151 return OPVCC(31, 444, 0, 0)
4153 return OPVCC(31, 444, 0, 1)
4155 return OPVCC(31, 412, 0, 0)
4157 return OPVCC(31, 412, 0, 1)
4160 return OPVCC(31, 506, 0, 0) /* popcntd - v2.06 */
4162 return OPVCC(31, 378, 0, 0) /* popcntw - v2.06 */
4164 return OPVCC(31, 122, 0, 0) /* popcntb - v2.02 */
4166 return OPVCC(31, 538, 0, 0) /* cnttzw - v3.00 */
4168 return OPVCC(31, 538, 0, 1) /* cnttzw. - v3.00 */
4170 return OPVCC(31, 570, 0, 0) /* cnttzd - v3.00 */
4172 return OPVCC(31, 570, 0, 1) /* cnttzd. - v3.00 */
4175 return OPVCC(19, 50, 0, 0)
4177 return OPVCC(19, 51, 0, 0)
4179 return OPVCC(19, 18, 0, 0)
4181 return OPVCC(19, 274, 0, 0)
4184 return OPVCC(20, 0, 0, 0)
4186 return OPVCC(20, 0, 0, 1)
4188 return OPVCC(23, 0, 0, 0)
4190 return OPVCC(23, 0, 0, 1)
4193 return OPVCC(30, 8, 0, 0)
4195 return OPVCC(30, 0, 0, 1)
4198 return OPVCC(30, 9, 0, 0)
4200 return OPVCC(30, 9, 0, 1)
4203 return OPVCC(30, 0, 0, 0)
4205 return OPVCC(30, 0, 0, 1)
4207 return OPMD(30, 1, 0) // rldicr
4209 return OPMD(30, 1, 1) // rldicr.
4212 return OPMD(30, 2, 0) // rldic
4214 return OPMD(30, 2, 1) // rldic.
4217 return OPVCC(17, 1, 0, 0)
4220 return OPVCC(31, 24, 0, 0)
4222 return OPVCC(31, 24, 0, 1)
4224 return OPVCC(31, 27, 0, 0)
4226 return OPVCC(31, 27, 0, 1)
4229 return OPVCC(31, 792, 0, 0)
4231 return OPVCC(31, 792, 0, 1)
4233 return OPVCC(31, 794, 0, 0)
4235 return OPVCC(31, 794, 0, 1)
4238 return OPVCC(31, 445, 0, 0)
4240 return OPVCC(31, 445, 0, 1)
4243 return OPVCC(31, 536, 0, 0)
4245 return OPVCC(31, 536, 0, 1)
4247 return OPVCC(31, 539, 0, 0)
4249 return OPVCC(31, 539, 0, 1)
4252 return OPVCC(31, 40, 0, 0)
4254 return OPVCC(31, 40, 0, 1)
4256 return OPVCC(31, 40, 1, 0)
4258 return OPVCC(31, 40, 1, 1)
4260 return OPVCC(31, 8, 0, 0)
4262 return OPVCC(31, 8, 0, 1)
4264 return OPVCC(31, 8, 1, 0)
4266 return OPVCC(31, 8, 1, 1)
4268 return OPVCC(31, 136, 0, 0)
4270 return OPVCC(31, 136, 0, 1)
4272 return OPVCC(31, 136, 1, 0)
4274 return OPVCC(31, 136, 1, 1)
4276 return OPVCC(31, 232, 0, 0)
4278 return OPVCC(31, 232, 0, 1)
4280 return OPVCC(31, 232, 1, 0)
4282 return OPVCC(31, 232, 1, 1)
4284 return OPVCC(31, 200, 0, 0)
4286 return OPVCC(31, 200, 0, 1)
4288 return OPVCC(31, 200, 1, 0)
4290 return OPVCC(31, 200, 1, 1)
4293 return OPVCC(31, 598, 0, 0)
4295 return OPVCC(31, 598, 0, 0) | 1<<21
4298 return OPVCC(31, 598, 0, 0) | 2<<21
4301 return OPVCC(31, 306, 0, 0)
4303 return OPVCC(31, 274, 0, 0)
4305 return OPVCC(31, 566, 0, 0)
4307 return OPVCC(31, 498, 0, 0)
4309 return OPVCC(31, 434, 0, 0)
4311 return OPVCC(31, 915, 0, 0)
4313 return OPVCC(31, 851, 0, 0)
4315 return OPVCC(31, 402, 0, 0)
4318 return OPVCC(31, 4, 0, 0)
4320 return OPVCC(31, 68, 0, 0)
4322 /* Vector (VMX/Altivec) instructions */
4323 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
4324 /* are enabled starting at POWER6 (ISA 2.05). */
4326 return OPVX(4, 1028, 0, 0) /* vand - v2.03 */
4328 return OPVX(4, 1092, 0, 0) /* vandc - v2.03 */
4330 return OPVX(4, 1412, 0, 0) /* vnand - v2.07 */
4333 return OPVX(4, 1156, 0, 0) /* vor - v2.03 */
4335 return OPVX(4, 1348, 0, 0) /* vorc - v2.07 */
4337 return OPVX(4, 1284, 0, 0) /* vnor - v2.03 */
4339 return OPVX(4, 1220, 0, 0) /* vxor - v2.03 */
4341 return OPVX(4, 1668, 0, 0) /* veqv - v2.07 */
4344 return OPVX(4, 0, 0, 0) /* vaddubm - v2.03 */
4346 return OPVX(4, 64, 0, 0) /* vadduhm - v2.03 */
4348 return OPVX(4, 128, 0, 0) /* vadduwm - v2.03 */
4350 return OPVX(4, 192, 0, 0) /* vaddudm - v2.07 */
4352 return OPVX(4, 256, 0, 0) /* vadduqm - v2.07 */
4355 return OPVX(4, 320, 0, 0) /* vaddcuq - v2.07 */
4357 return OPVX(4, 384, 0, 0) /* vaddcuw - v2.03 */
4360 return OPVX(4, 512, 0, 0) /* vaddubs - v2.03 */
4362 return OPVX(4, 576, 0, 0) /* vadduhs - v2.03 */
4364 return OPVX(4, 640, 0, 0) /* vadduws - v2.03 */
4367 return OPVX(4, 768, 0, 0) /* vaddsbs - v2.03 */
4369 return OPVX(4, 832, 0, 0) /* vaddshs - v2.03 */
4371 return OPVX(4, 896, 0, 0) /* vaddsws - v2.03 */
4374 return OPVX(4, 60, 0, 0) /* vaddeuqm - v2.07 */
4376 return OPVX(4, 61, 0, 0) /* vaddecuq - v2.07 */
4379 return OPVX(4, 776, 0, 0) /* vmulesb - v2.03 */
4381 return OPVX(4, 264, 0, 0) /* vmulosb - v2.03 */
4383 return OPVX(4, 520, 0, 0) /* vmuleub - v2.03 */
4385 return OPVX(4, 8, 0, 0) /* vmuloub - v2.03 */
4387 return OPVX(4, 840, 0, 0) /* vmulesh - v2.03 */
4389 return OPVX(4, 328, 0, 0) /* vmulosh - v2.03 */
4391 return OPVX(4, 584, 0, 0) /* vmuleuh - v2.03 */
4393 return OPVX(4, 72, 0, 0) /* vmulouh - v2.03 */
4395 return OPVX(4, 904, 0, 0) /* vmulesw - v2.07 */
4397 return OPVX(4, 392, 0, 0) /* vmulosw - v2.07 */
4399 return OPVX(4, 648, 0, 0) /* vmuleuw - v2.07 */
4401 return OPVX(4, 136, 0, 0) /* vmulouw - v2.07 */
4403 return OPVX(4, 137, 0, 0) /* vmuluwm - v2.07 */
4406 return OPVX(4, 1032, 0, 0) /* vpmsumb - v2.07 */
4408 return OPVX(4, 1096, 0, 0) /* vpmsumh - v2.07 */
4410 return OPVX(4, 1160, 0, 0) /* vpmsumw - v2.07 */
4412 return OPVX(4, 1224, 0, 0) /* vpmsumd - v2.07 */
4415 return OPVX(4, 35, 0, 0) /* vmsumudm - v3.00b */
4418 return OPVX(4, 1024, 0, 0) /* vsububm - v2.03 */
4420 return OPVX(4, 1088, 0, 0) /* vsubuhm - v2.03 */
4422 return OPVX(4, 1152, 0, 0) /* vsubuwm - v2.03 */
4424 return OPVX(4, 1216, 0, 0) /* vsubudm - v2.07 */
4426 return OPVX(4, 1280, 0, 0) /* vsubuqm - v2.07 */
4429 return OPVX(4, 1344, 0, 0) /* vsubcuq - v2.07 */
4431 return OPVX(4, 1408, 0, 0) /* vsubcuw - v2.03 */
4434 return OPVX(4, 1536, 0, 0) /* vsububs - v2.03 */
4436 return OPVX(4, 1600, 0, 0) /* vsubuhs - v2.03 */
4438 return OPVX(4, 1664, 0, 0) /* vsubuws - v2.03 */
4441 return OPVX(4, 1792, 0, 0) /* vsubsbs - v2.03 */
4443 return OPVX(4, 1856, 0, 0) /* vsubshs - v2.03 */
4445 return OPVX(4, 1920, 0, 0) /* vsubsws - v2.03 */
4448 return OPVX(4, 62, 0, 0) /* vsubeuqm - v2.07 */
4450 return OPVX(4, 63, 0, 0) /* vsubecuq - v2.07 */
4453 return OPVX(4, 4, 0, 0) /* vrlb - v2.03 */
4455 return OPVX(4, 68, 0, 0) /* vrlh - v2.03 */
4457 return OPVX(4, 132, 0, 0) /* vrlw - v2.03 */
4459 return OPVX(4, 196, 0, 0) /* vrld - v2.07 */
4462 return OPVX(4, 1676, 0, 0) /* vmrgow - v2.07 */
4464 return OPVX(4, 1932, 0, 0) /* vmrgew - v2.07 */
4467 return OPVX(4, 260, 0, 0) /* vslh - v2.03 */
4469 return OPVX(4, 324, 0, 0) /* vslh - v2.03 */
4471 return OPVX(4, 388, 0, 0) /* vslw - v2.03 */
4473 return OPVX(4, 452, 0, 0) /* vsl - v2.03 */
4475 return OPVX(4, 1036, 0, 0) /* vsl - v2.03 */
4477 return OPVX(4, 516, 0, 0) /* vsrb - v2.03 */
4479 return OPVX(4, 580, 0, 0) /* vsrh - v2.03 */
4481 return OPVX(4, 644, 0, 0) /* vsrw - v2.03 */
4483 return OPVX(4, 708, 0, 0) /* vsr - v2.03 */
4485 return OPVX(4, 1100, 0, 0) /* vsro - v2.03 */
4487 return OPVX(4, 1476, 0, 0) /* vsld - v2.07 */
4489 return OPVX(4, 1732, 0, 0) /* vsrd - v2.07 */
4492 return OPVX(4, 772, 0, 0) /* vsrab - v2.03 */
4494 return OPVX(4, 836, 0, 0) /* vsrah - v2.03 */
4496 return OPVX(4, 900, 0, 0) /* vsraw - v2.03 */
4498 return OPVX(4, 964, 0, 0) /* vsrad - v2.07 */
4501 return OPVC(4, 1356, 0, 0) /* vbpermq - v2.07 */
4503 return OPVC(4, 1484, 0, 0) /* vbpermd - v3.00 */
4506 return OPVX(4, 1794, 0, 0) /* vclzb - v2.07 */
4508 return OPVX(4, 1858, 0, 0) /* vclzh - v2.07 */
4510 return OPVX(4, 1922, 0, 0) /* vclzw - v2.07 */
4512 return OPVX(4, 1986, 0, 0) /* vclzd - v2.07 */
4515 return OPVX(4, 1795, 0, 0) /* vpopcntb - v2.07 */
4517 return OPVX(4, 1859, 0, 0) /* vpopcnth - v2.07 */
4519 return OPVX(4, 1923, 0, 0) /* vpopcntw - v2.07 */
4521 return OPVX(4, 1987, 0, 0) /* vpopcntd - v2.07 */
4524 return OPVC(4, 6, 0, 0) /* vcmpequb - v2.03 */
4526 return OPVC(4, 6, 0, 1) /* vcmpequb. - v2.03 */
4528 return OPVC(4, 70, 0, 0) /* vcmpequh - v2.03 */
4530 return OPVC(4, 70, 0, 1) /* vcmpequh. - v2.03 */
4532 return OPVC(4, 134, 0, 0) /* vcmpequw - v2.03 */
4534 return OPVC(4, 134, 0, 1) /* vcmpequw. - v2.03 */
4536 return OPVC(4, 199, 0, 0) /* vcmpequd - v2.07 */
4538 return OPVC(4, 199, 0, 1) /* vcmpequd. - v2.07 */
4541 return OPVC(4, 518, 0, 0) /* vcmpgtub - v2.03 */
4543 return OPVC(4, 518, 0, 1) /* vcmpgtub. - v2.03 */
4545 return OPVC(4, 582, 0, 0) /* vcmpgtuh - v2.03 */
4547 return OPVC(4, 582, 0, 1) /* vcmpgtuh. - v2.03 */
4549 return OPVC(4, 646, 0, 0) /* vcmpgtuw - v2.03 */
4551 return OPVC(4, 646, 0, 1) /* vcmpgtuw. - v2.03 */
4553 return OPVC(4, 711, 0, 0) /* vcmpgtud - v2.07 */
4555 return OPVC(4, 711, 0, 1) /* vcmpgtud. v2.07 */
4557 return OPVC(4, 774, 0, 0) /* vcmpgtsb - v2.03 */
4559 return OPVC(4, 774, 0, 1) /* vcmpgtsb. - v2.03 */
4561 return OPVC(4, 838, 0, 0) /* vcmpgtsh - v2.03 */
4563 return OPVC(4, 838, 0, 1) /* vcmpgtsh. - v2.03 */
4565 return OPVC(4, 902, 0, 0) /* vcmpgtsw - v2.03 */
4567 return OPVC(4, 902, 0, 1) /* vcmpgtsw. - v2.03 */
4569 return OPVC(4, 967, 0, 0) /* vcmpgtsd - v2.07 */
4571 return OPVC(4, 967, 0, 1) /* vcmpgtsd. - v2.07 */
4574 return OPVC(4, 263, 0, 0) /* vcmpnezb - v3.00 */
4576 return OPVC(4, 263, 0, 1) /* vcmpnezb. - v3.00 */
4578 return OPVC(4, 7, 0, 0) /* vcmpneb - v3.00 */
4580 return OPVC(4, 7, 0, 1) /* vcmpneb. - v3.00 */
4582 return OPVC(4, 71, 0, 0) /* vcmpneh - v3.00 */
4584 return OPVC(4, 71, 0, 1) /* vcmpneh. - v3.00 */
4586 return OPVC(4, 135, 0, 0) /* vcmpnew - v3.00 */
4588 return OPVC(4, 135, 0, 1) /* vcmpnew. - v3.00 */
4591 return OPVX(4, 43, 0, 0) /* vperm - v2.03 */
4593 return OPVX(4, 45, 0, 0) /* vpermxor - v2.03 */
4595 return OPVX(4, 59, 0, 0) /* vpermr - v3.0 */
4598 return OPVX(4, 42, 0, 0) /* vsel - v2.03 */
4601 return OPVX(4, 1288, 0, 0) /* vcipher - v2.07 */
4603 return OPVX(4, 1289, 0, 0) /* vcipherlast - v2.07 */
4605 return OPVX(4, 1352, 0, 0) /* vncipher - v2.07 */
4607 return OPVX(4, 1353, 0, 0) /* vncipherlast - v2.07 */
4609 return OPVX(4, 1480, 0, 0) /* vsbox - v2.07 */
4610 /* End of vector instructions */
4612 /* Vector scalar (VSX) instructions */
4613 /* ISA 2.06 enables these for POWER7. */
4614 case AMFVSRD, AMFVRD, AMFFPRD:
4615 return OPVXX1(31, 51, 0) /* mfvsrd - v2.07 */
4617 return OPVXX1(31, 115, 0) /* mfvsrwz - v2.07 */
4619 return OPVXX1(31, 307, 0) /* mfvsrld - v3.00 */
4621 case AMTVSRD, AMTFPRD, AMTVRD:
4622 return OPVXX1(31, 179, 0) /* mtvsrd - v2.07 */
4624 return OPVXX1(31, 211, 0) /* mtvsrwa - v2.07 */
4626 return OPVXX1(31, 243, 0) /* mtvsrwz - v2.07 */
4628 return OPVXX1(31, 435, 0) /* mtvsrdd - v3.00 */
4630 return OPVXX1(31, 403, 0) /* mtvsrws - v3.00 */
4633 return OPVXX3(60, 130, 0) /* xxland - v2.06 */
4635 return OPVXX3(60, 138, 0) /* xxlandc - v2.06 */
4637 return OPVXX3(60, 186, 0) /* xxleqv - v2.07 */
4639 return OPVXX3(60, 178, 0) /* xxlnand - v2.07 */
4642 return OPVXX3(60, 170, 0) /* xxlorc - v2.07 */
4644 return OPVXX3(60, 162, 0) /* xxlnor - v2.06 */
4645 case AXXLOR, AXXLORQ:
4646 return OPVXX3(60, 146, 0) /* xxlor - v2.06 */
4648 return OPVXX3(60, 154, 0) /* xxlxor - v2.06 */
4651 return OPVXX4(60, 3, 0) /* xxsel - v2.06 */
4654 return OPVXX3(60, 18, 0) /* xxmrghw - v2.06 */
4656 return OPVXX3(60, 50, 0) /* xxmrglw - v2.06 */
4659 return OPVXX2(60, 164, 0) /* xxspltw - v2.06 */
4662 return OPVCC(60, 360, 0, 0) /* xxspltib - v3.0 */
4665 return OPVXX3(60, 26, 0) /* xxperm - v2.06 */
4667 return OPVXX3(60, 10, 0) /* xxpermdi - v2.06 */
4670 return OPVXX3(60, 2, 0) /* xxsldwi - v2.06 */
4673 return OPVXX2VA(60, 475, 31) /* xxbrq - v3.0 */
4675 return OPVXX2VA(60, 475, 23) /* xxbrd - v3.0 */
4677 return OPVXX2VA(60, 475, 15) /* xxbrw - v3.0 */
4679 return OPVXX2VA(60, 475, 7) /* xxbrh - v3.0 */
4682 return OPVXX2(60, 265, 0) /* xscvdpsp - v2.06 */
4684 return OPVXX2(60, 329, 0) /* xscvspdp - v2.06 */
4686 return OPVXX2(60, 267, 0) /* xscvdpspn - v2.07 */
4688 return OPVXX2(60, 331, 0) /* xscvspdpn - v2.07 */
4691 return OPVXX2(60, 393, 0) /* xvcvdpsp - v2.06 */
4693 return OPVXX2(60, 457, 0) /* xvcvspdp - v2.06 */
4696 return OPVXX2(60, 344, 0) /* xscvdpsxds - v2.06 */
4698 return OPVXX2(60, 88, 0) /* xscvdpsxws - v2.06 */
4700 return OPVXX2(60, 328, 0) /* xscvdpuxds - v2.06 */
4702 return OPVXX2(60, 72, 0) /* xscvdpuxws - v2.06 */
4705 return OPVXX2(60, 376, 0) /* xscvsxddp - v2.06 */
4707 return OPVXX2(60, 360, 0) /* xscvuxddp - v2.06 */
4709 return OPVXX2(60, 312, 0) /* xscvsxdsp - v2.06 */
4711 return OPVXX2(60, 296, 0) /* xscvuxdsp - v2.06 */
4714 return OPVXX2(60, 472, 0) /* xvcvdpsxds - v2.06 */
4716 return OPVXX2(60, 216, 0) /* xvcvdpsxws - v2.06 */
4718 return OPVXX2(60, 456, 0) /* xvcvdpuxds - v2.06 */
4720 return OPVXX2(60, 200, 0) /* xvcvdpuxws - v2.06 */
4722 return OPVXX2(60, 408, 0) /* xvcvspsxds - v2.07 */
4724 return OPVXX2(60, 152, 0) /* xvcvspsxws - v2.07 */
4726 return OPVXX2(60, 392, 0) /* xvcvspuxds - v2.07 */
4728 return OPVXX2(60, 136, 0) /* xvcvspuxws - v2.07 */
4731 return OPVXX2(60, 504, 0) /* xvcvsxddp - v2.06 */
4733 return OPVXX2(60, 248, 0) /* xvcvsxwdp - v2.06 */
4735 return OPVXX2(60, 488, 0) /* xvcvuxddp - v2.06 */
4737 return OPVXX2(60, 232, 0) /* xvcvuxwdp - v2.06 */
4739 return OPVXX2(60, 440, 0) /* xvcvsxdsp - v2.06 */
4741 return OPVXX2(60, 184, 0) /* xvcvsxwsp - v2.06 */
4743 return OPVXX2(60, 424, 0) /* xvcvuxdsp - v2.06 */
4745 return OPVXX2(60, 168, 0) /* xvcvuxwsp - v2.06 */
4746 /* End of VSX instructions */
4749 return OPVX(4, 48, 0, 0) /* maddhd - v3.00 */
4751 return OPVX(4, 49, 0, 0) /* maddhdu - v3.00 */
4753 return OPVX(4, 51, 0, 0) /* maddld - v3.00 */
4756 return OPVCC(31, 316, 0, 0)
4758 return OPVCC(31, 316, 0, 1)
4761 c.ctxt.Diag("bad r/r, r/r/r or r/r/r/r opcode %v", a)
4765 func (c *ctxt9) opirrr(a obj.As) uint32 {
4767 /* Vector (VMX/Altivec) instructions */
4768 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
4769 /* are enabled starting at POWER6 (ISA 2.05). */
4771 return OPVX(4, 44, 0, 0) /* vsldoi - v2.03 */
4774 c.ctxt.Diag("bad i/r/r/r opcode %v", a)
4778 func (c *ctxt9) opiirr(a obj.As) uint32 {
4780 /* Vector (VMX/Altivec) instructions */
4781 /* ISA 2.07 enables these for POWER8 and beyond. */
4783 return OPVX(4, 1666, 0, 0) /* vshasigmaw - v2.07 */
4785 return OPVX(4, 1730, 0, 0) /* vshasigmad - v2.07 */
4788 c.ctxt.Diag("bad i/i/r/r opcode %v", a)
4792 func (c *ctxt9) opirr(a obj.As) uint32 {
4795 return OPVCC(14, 0, 0, 0)
4797 return OPVCC(12, 0, 0, 0)
4799 return OPVCC(13, 0, 0, 0)
4801 return OPVCC(15, 0, 0, 0) /* ADDIS */
4804 return OPVCC(28, 0, 0, 0)
4806 return OPVCC(29, 0, 0, 0) /* ANDIS. */
4809 return OPVCC(18, 0, 0, 0)
4811 return OPVCC(18, 0, 0, 0) | 1
4813 return OPVCC(18, 0, 0, 0) | 1
4815 return OPVCC(18, 0, 0, 0) | 1
4817 return OPVCC(16, 0, 0, 0)
4819 return OPVCC(16, 0, 0, 0) | 1
4822 return AOP_RRR(16<<26, 12, 2, 0)
4824 return AOP_RRR(16<<26, 4, 0, 0)
4826 return AOP_RRR(16<<26, 12, 1, 0)
4828 return AOP_RRR(16<<26, 4, 1, 0)
4830 return AOP_RRR(16<<26, 12, 0, 0)
4832 return AOP_RRR(16<<26, 4, 2, 0)
4834 return AOP_RRR(16<<26, 4, 3, 0) // apparently unordered-clear
4836 return AOP_RRR(16<<26, 12, 3, 0) // apparently unordered-set
4839 return OPVCC(11, 0, 0, 0) | 1<<21 /* L=1 */
4841 return OPVCC(10, 0, 0, 0) | 1<<21
4843 return OPVCC(11, 0, 0, 0) /* L=0 */
4845 return OPVCC(10, 0, 0, 0)
4847 return OPVCC(31, 224, 0, 0) /* cmpeqb - v3.00 */
4850 return OPVCC(31, 597, 0, 0)
4853 return OPVCC(31, 774, 0, 0) /* copy - v3.00 */
4855 return OPVCC(31, 902, 0, 1) /* paste. - v3.00 */
4857 return OPVCC(31, 755, 0, 0) /* darn - v3.00 */
4859 case AMULLW, AMULLD:
4860 return OPVCC(7, 0, 0, 0) /* mulli works with MULLW or MULLD */
4863 return OPVCC(24, 0, 0, 0)
4865 return OPVCC(25, 0, 0, 0) /* ORIS */
4868 return OPVCC(20, 0, 0, 0) /* rlwimi */
4870 return OPVCC(20, 0, 0, 1)
4872 return OPMD(30, 3, 0) /* rldimi */
4874 return OPMD(30, 3, 1) /* rldimi. */
4876 return OPMD(30, 3, 0) /* rldimi */
4878 return OPMD(30, 3, 1) /* rldimi. */
4880 return OPVCC(21, 0, 0, 0) /* rlwinm */
4882 return OPVCC(21, 0, 0, 1)
4885 return OPMD(30, 0, 0) /* rldicl */
4887 return OPMD(30, 0, 1) /* rldicl. */
4889 return OPMD(30, 1, 0) /* rldicr */
4891 return OPMD(30, 1, 1) /* rldicr. */
4893 return OPMD(30, 2, 0) /* rldic */
4895 return OPMD(30, 2, 1) /* rldic. */
4898 return OPVCC(31, 824, 0, 0)
4900 return OPVCC(31, 824, 0, 1)
4902 return OPVCC(31, (413 << 1), 0, 0)
4904 return OPVCC(31, (413 << 1), 0, 1)
4906 return OPVCC(31, 445, 0, 0)
4908 return OPVCC(31, 445, 0, 1)
4911 return OPVCC(31, 725, 0, 0)
4914 return OPVCC(8, 0, 0, 0)
4917 return OPVCC(3, 0, 0, 0)
4919 return OPVCC(2, 0, 0, 0)
4921 /* Vector (VMX/Altivec) instructions */
4922 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
4923 /* are enabled starting at POWER6 (ISA 2.05). */
4925 return OPVX(4, 524, 0, 0) /* vspltb - v2.03 */
4927 return OPVX(4, 588, 0, 0) /* vsplth - v2.03 */
4929 return OPVX(4, 652, 0, 0) /* vspltw - v2.03 */
4932 return OPVX(4, 780, 0, 0) /* vspltisb - v2.03 */
4934 return OPVX(4, 844, 0, 0) /* vspltish - v2.03 */
4936 return OPVX(4, 908, 0, 0) /* vspltisw - v2.03 */
4937 /* End of vector instructions */
4940 return OPVCC(63, 128, 0, 0) /* ftdiv - v2.06 */
4942 return OPVCC(63, 160, 0, 0) /* ftsqrt - v2.06 */
4945 return OPVCC(26, 0, 0, 0) /* XORIL */
4947 return OPVCC(27, 0, 0, 0) /* XORIS */
4950 c.ctxt.Diag("bad opcode i/r or i/r/r %v", a)
4957 func (c *ctxt9) opload(a obj.As) uint32 {
4960 return OPVCC(58, 0, 0, 0) /* ld */
4962 return OPVCC(58, 0, 0, 1) /* ldu */
4964 return OPVCC(32, 0, 0, 0) /* lwz */
4966 return OPVCC(33, 0, 0, 0) /* lwzu */
4968 return OPVCC(58, 0, 0, 0) | 1<<1 /* lwa */
4970 return OPDQ(61, 1, 0) /* lxv - ISA v3.0 */
4972 return OPVXX1(31, 269, 0) /* lxvl - ISA v3.0 */
4974 return OPVXX1(31, 301, 0) /* lxvll - ISA v3.0 */
4976 return OPVXX1(31, 268, 0) /* lxvx - ISA v3.0 */
4980 return OPVCC(34, 0, 0, 0)
4983 case AMOVBU, AMOVBZU:
4984 return OPVCC(35, 0, 0, 0)
4986 return OPVCC(50, 0, 0, 0)
4988 return OPVCC(51, 0, 0, 0)
4990 return OPVCC(48, 0, 0, 0)
4992 return OPVCC(49, 0, 0, 0)
4994 return OPVCC(42, 0, 0, 0)
4996 return OPVCC(43, 0, 0, 0)
4998 return OPVCC(40, 0, 0, 0)
5000 return OPVCC(41, 0, 0, 0)
5002 return OPVCC(46, 0, 0, 0) /* lmw */
5005 c.ctxt.Diag("bad load opcode %v", a)
5010 * indexed load a(b),d
5012 func (c *ctxt9) oploadx(a obj.As) uint32 {
5015 return OPVCC(31, 23, 0, 0) /* lwzx */
5017 return OPVCC(31, 55, 0, 0) /* lwzux */
5019 return OPVCC(31, 341, 0, 0) /* lwax */
5021 return OPVCC(31, 373, 0, 0) /* lwaux */
5024 return OPVCC(31, 87, 0, 0) /* lbzx */
5026 case AMOVBU, AMOVBZU:
5027 return OPVCC(31, 119, 0, 0) /* lbzux */
5029 return OPVCC(31, 599, 0, 0) /* lfdx */
5031 return OPVCC(31, 631, 0, 0) /* lfdux */
5033 return OPVCC(31, 535, 0, 0) /* lfsx */
5035 return OPVCC(31, 567, 0, 0) /* lfsux */
5037 return OPVCC(31, 855, 0, 0) /* lfiwax - power6, isa 2.05 */
5039 return OPVCC(31, 887, 0, 0) /* lfiwzx - power7, isa 2.06 */
5041 return OPVCC(31, 343, 0, 0) /* lhax */
5043 return OPVCC(31, 375, 0, 0) /* lhaux */
5045 return OPVCC(31, 790, 0, 0) /* lhbrx */
5047 return OPVCC(31, 534, 0, 0) /* lwbrx */
5049 return OPVCC(31, 532, 0, 0) /* ldbrx */
5051 return OPVCC(31, 279, 0, 0) /* lhzx */
5053 return OPVCC(31, 311, 0, 0) /* lhzux */
5055 return OPVCC(31, 310, 0, 0) /* eciwx */
5057 return OPVCC(31, 52, 0, 0) /* lbarx */
5059 return OPVCC(31, 116, 0, 0) /* lharx */
5061 return OPVCC(31, 20, 0, 0) /* lwarx */
5063 return OPVCC(31, 84, 0, 0) /* ldarx */
5065 return OPVCC(31, 533, 0, 0) /* lswx */
5067 return OPVCC(31, 21, 0, 0) /* ldx */
5069 return OPVCC(31, 53, 0, 0) /* ldux */
5071 return OPVCC(31, 309, 0, 0) /* ldmx */
5073 /* Vector (VMX/Altivec) instructions */
5075 return OPVCC(31, 7, 0, 0) /* lvebx - v2.03 */
5077 return OPVCC(31, 39, 0, 0) /* lvehx - v2.03 */
5079 return OPVCC(31, 71, 0, 0) /* lvewx - v2.03 */
5081 return OPVCC(31, 103, 0, 0) /* lvx - v2.03 */
5083 return OPVCC(31, 359, 0, 0) /* lvxl - v2.03 */
5085 return OPVCC(31, 6, 0, 0) /* lvsl - v2.03 */
5087 return OPVCC(31, 38, 0, 0) /* lvsr - v2.03 */
5088 /* End of vector instructions */
5090 /* Vector scalar (VSX) instructions */
5092 return OPVXX1(31, 268, 0) /* lxvx - ISA v3.0 */
5094 return OPVXX1(31, 844, 0) /* lxvd2x - v2.06 */
5096 return OPVXX1(31, 780, 0) /* lxvw4x - v2.06 */
5098 return OPVXX1(31, 812, 0) /* lxvh8x - v3.00 */
5100 return OPVXX1(31, 876, 0) /* lxvb16x - v3.00 */
5102 return OPVXX1(31, 332, 0) /* lxvdsx - v2.06 */
5104 return OPVXX1(31, 588, 0) /* lxsdx - v2.06 */
5106 return OPVXX1(31, 76, 0) /* lxsiwax - v2.07 */
5108 return OPVXX1(31, 12, 0) /* lxsiwzx - v2.07 */
5111 c.ctxt.Diag("bad loadx opcode %v", a)
5118 func (c *ctxt9) opstore(a obj.As) uint32 {
5121 return OPVCC(38, 0, 0, 0) /* stb */
5123 case AMOVBU, AMOVBZU:
5124 return OPVCC(39, 0, 0, 0) /* stbu */
5126 return OPVCC(54, 0, 0, 0) /* stfd */
5128 return OPVCC(55, 0, 0, 0) /* stfdu */
5130 return OPVCC(52, 0, 0, 0) /* stfs */
5132 return OPVCC(53, 0, 0, 0) /* stfsu */
5135 return OPVCC(44, 0, 0, 0) /* sth */
5137 case AMOVHZU, AMOVHU:
5138 return OPVCC(45, 0, 0, 0) /* sthu */
5140 return OPVCC(47, 0, 0, 0) /* stmw */
5142 return OPVCC(31, 725, 0, 0) /* stswi */
5145 return OPVCC(36, 0, 0, 0) /* stw */
5147 case AMOVWZU, AMOVWU:
5148 return OPVCC(37, 0, 0, 0) /* stwu */
5150 return OPVCC(62, 0, 0, 0) /* std */
5152 return OPVCC(62, 0, 0, 1) /* stdu */
5154 return OPDQ(61, 5, 0) /* stxv ISA 3.0 */
5156 return OPVXX1(31, 397, 0) /* stxvl ISA 3.0 */
5158 return OPVXX1(31, 429, 0) /* stxvll ISA 3.0 */
5160 return OPVXX1(31, 396, 0) /* stxvx - ISA v3.0 */
5164 c.ctxt.Diag("unknown store opcode %v", a)
5169 * indexed store s,a(b)
5171 func (c *ctxt9) opstorex(a obj.As) uint32 {
5174 return OPVCC(31, 215, 0, 0) /* stbx */
5176 case AMOVBU, AMOVBZU:
5177 return OPVCC(31, 247, 0, 0) /* stbux */
5179 return OPVCC(31, 727, 0, 0) /* stfdx */
5181 return OPVCC(31, 759, 0, 0) /* stfdux */
5183 return OPVCC(31, 663, 0, 0) /* stfsx */
5185 return OPVCC(31, 695, 0, 0) /* stfsux */
5187 return OPVCC(31, 983, 0, 0) /* stfiwx */
5190 return OPVCC(31, 407, 0, 0) /* sthx */
5192 return OPVCC(31, 918, 0, 0) /* sthbrx */
5194 case AMOVHZU, AMOVHU:
5195 return OPVCC(31, 439, 0, 0) /* sthux */
5198 return OPVCC(31, 151, 0, 0) /* stwx */
5200 case AMOVWZU, AMOVWU:
5201 return OPVCC(31, 183, 0, 0) /* stwux */
5203 return OPVCC(31, 661, 0, 0) /* stswx */
5205 return OPVCC(31, 662, 0, 0) /* stwbrx */
5207 return OPVCC(31, 660, 0, 0) /* stdbrx */
5209 return OPVCC(31, 694, 0, 1) /* stbcx. */
5211 return OPVCC(31, 726, 0, 1) /* sthcx. */
5213 return OPVCC(31, 150, 0, 1) /* stwcx. */
5215 return OPVCC(31, 214, 0, 1) /* stwdx. */
5217 return OPVCC(31, 438, 0, 0) /* ecowx */
5219 return OPVCC(31, 149, 0, 0) /* stdx */
5221 return OPVCC(31, 181, 0, 0) /* stdux */
5223 /* Vector (VMX/Altivec) instructions */
5225 return OPVCC(31, 135, 0, 0) /* stvebx - v2.03 */
5227 return OPVCC(31, 167, 0, 0) /* stvehx - v2.03 */
5229 return OPVCC(31, 199, 0, 0) /* stvewx - v2.03 */
5231 return OPVCC(31, 231, 0, 0) /* stvx - v2.03 */
5233 return OPVCC(31, 487, 0, 0) /* stvxl - v2.03 */
5234 /* End of vector instructions */
5236 /* Vector scalar (VSX) instructions */
5238 return OPVXX1(31, 396, 0) /* stxvx - v3.0 */
5240 return OPVXX1(31, 972, 0) /* stxvd2x - v2.06 */
5242 return OPVXX1(31, 908, 0) /* stxvw4x - v2.06 */
5244 return OPVXX1(31, 940, 0) /* stxvh8x - v3.0 */
5246 return OPVXX1(31, 1004, 0) /* stxvb16x - v3.0 */
5249 return OPVXX1(31, 716, 0) /* stxsdx - v2.06 */
5252 return OPVXX1(31, 140, 0) /* stxsiwx - v2.07 */
5254 /* End of vector scalar instructions */
5258 c.ctxt.Diag("unknown storex opcode %v", a)