1 // cmd/9l/optab.c, cmd/9l/asmout.c from Vita Nuova.
3 // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
4 // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
5 // Portions Copyright © 1997-1999 Vita Nuova Limited
6 // Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com)
7 // Portions Copyright © 2004,2006 Bruce Ellis
8 // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
9 // Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others
10 // Portions Copyright © 2009 The Go Authors. All rights reserved.
12 // Permission is hereby granted, free of charge, to any person obtaining a copy
13 // of this software and associated documentation files (the "Software"), to deal
14 // in the Software without restriction, including without limitation the rights
15 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
16 // copies of the Software, and to permit persons to whom the Software is
17 // furnished to do so, subject to the following conditions:
19 // The above copyright notice and this permission notice shall be included in
20 // all copies or substantial portions of the Software.
22 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
27 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
42 // ctxt9 holds state while assembling a single function.
43 // Each function gets a fresh ctxt9.
44 // This allows for multiple functions to be safely concurrently assembled.
54 // Instruction layout.
58 funcAlignMask = funcAlign - 1
67 a1 uint8 // p.From argument (obj.Addr). p is of type obj.Prog.
68 a2 uint8 // p.Reg argument (int16 Register)
69 a3 uint8 // p.RestArgs[0] (obj.AddrPos)
70 a4 uint8 // p.RestArgs[1]
71 a5 uint8 // p.RestARgs[2]
72 a6 uint8 // p.To (obj.Addr)
73 type_ int8 // cases in asmout below. E.g., 44 = st r,(ra+rb); 45 = ld (ra+rb), r
74 size int8 // Text space in bytes to lay operation
77 // optab contains an array to be sliced of accepted operand combinations for an
78 // instruction. Unused arguments and fields are not explicitly enumerated, and
79 // should not be listed for clarity. Unused arguments and values should always
80 // assume the default value for the given type.
82 // optab does not list every valid ppc64 opcode, it enumerates representative
83 // operand combinations for a class of instruction. The variable oprange indexes
84 // all valid ppc64 opcodes.
86 // oprange is initialized to point a slice within optab which contains the valid
87 // operand combinations for a given instruction. This is initialized from buildop.
89 // Likewise, each slice of optab is dynamically sorted using the ocmp Sort interface
90 // to arrange entries to minimize text size of each opcode.
92 {as: obj.ATEXT, a1: C_LOREG, a6: C_TEXTSIZE, type_: 0, size: 0},
93 {as: obj.ATEXT, a1: C_LOREG, a3: C_LCON, a6: C_TEXTSIZE, type_: 0, size: 0},
94 {as: obj.ATEXT, a1: C_ADDR, a6: C_TEXTSIZE, type_: 0, size: 0},
95 {as: obj.ATEXT, a1: C_ADDR, a3: C_LCON, a6: C_TEXTSIZE, type_: 0, size: 0},
97 {as: AADD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
98 {as: AADD, a1: C_REG, a6: C_REG, type_: 2, size: 4},
99 {as: AADD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
100 {as: AADD, a1: C_SCON, a6: C_REG, type_: 4, size: 4},
101 {as: AADD, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
102 {as: AADD, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
103 {as: AADD, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 20, size: 4},
104 {as: AADD, a1: C_UCON, a6: C_REG, type_: 20, size: 4},
105 {as: AADD, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 22, size: 8},
106 {as: AADD, a1: C_ANDCON, a6: C_REG, type_: 22, size: 8},
107 {as: AADD, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12},
108 {as: AADD, a1: C_LCON, a6: C_REG, type_: 22, size: 12},
109 {as: AADDIS, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 20, size: 4},
110 {as: AADDIS, a1: C_ADDCON, a6: C_REG, type_: 20, size: 4},
111 {as: AADDC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
112 {as: AADDC, a1: C_REG, a6: C_REG, type_: 2, size: 4},
113 {as: AADDC, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
114 {as: AADDC, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
115 {as: AADDC, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12},
116 {as: AADDC, a1: C_LCON, a6: C_REG, type_: 22, size: 12},
117 {as: AAND, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, /* logical, no literal */
118 {as: AAND, a1: C_REG, a6: C_REG, type_: 6, size: 4},
119 {as: AANDCC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
120 {as: AANDCC, a1: C_REG, a6: C_REG, type_: 6, size: 4},
121 {as: AANDCC, a1: C_ANDCON, a6: C_REG, type_: 58, size: 4},
122 {as: AANDCC, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 58, size: 4},
123 {as: AANDCC, a1: C_UCON, a6: C_REG, type_: 59, size: 4},
124 {as: AANDCC, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
125 {as: AANDCC, a1: C_ADDCON, a6: C_REG, type_: 23, size: 8},
126 {as: AANDCC, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 23, size: 8},
127 {as: AANDCC, a1: C_LCON, a6: C_REG, type_: 23, size: 12},
128 {as: AANDCC, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 23, size: 12},
129 {as: AANDISCC, a1: C_ANDCON, a6: C_REG, type_: 59, size: 4},
130 {as: AANDISCC, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
131 {as: AMULLW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
132 {as: AMULLW, a1: C_REG, a6: C_REG, type_: 2, size: 4},
133 {as: AMULLW, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
134 {as: AMULLW, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
135 {as: AMULLW, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
136 {as: AMULLW, a1: C_ANDCON, a6: C_REG, type_: 4, size: 4},
137 {as: AMULLW, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12},
138 {as: AMULLW, a1: C_LCON, a6: C_REG, type_: 22, size: 12},
139 {as: ASUBC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 10, size: 4},
140 {as: ASUBC, a1: C_REG, a6: C_REG, type_: 10, size: 4},
141 {as: ASUBC, a1: C_REG, a3: C_ADDCON, a6: C_REG, type_: 27, size: 4},
142 {as: ASUBC, a1: C_REG, a3: C_LCON, a6: C_REG, type_: 28, size: 12},
143 {as: AOR, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, /* logical, literal not cc (or/xor) */
144 {as: AOR, a1: C_REG, a6: C_REG, type_: 6, size: 4},
145 {as: AOR, a1: C_ANDCON, a6: C_REG, type_: 58, size: 4},
146 {as: AOR, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 58, size: 4},
147 {as: AOR, a1: C_UCON, a6: C_REG, type_: 59, size: 4},
148 {as: AOR, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
149 {as: AOR, a1: C_ADDCON, a6: C_REG, type_: 23, size: 8},
150 {as: AOR, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 23, size: 8},
151 {as: AOR, a1: C_LCON, a6: C_REG, type_: 23, size: 12},
152 {as: AOR, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 23, size: 12},
153 {as: AORIS, a1: C_ANDCON, a6: C_REG, type_: 59, size: 4},
154 {as: AORIS, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
155 {as: ADIVW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4}, /* op r1[,r2],r3 */
156 {as: ADIVW, a1: C_REG, a6: C_REG, type_: 2, size: 4},
157 {as: ASUB, a1: C_REG, a2: C_REG, a6: C_REG, type_: 10, size: 4}, /* op r2[,r1],r3 */
158 {as: ASUB, a1: C_REG, a6: C_REG, type_: 10, size: 4},
159 {as: ASLW, a1: C_REG, a6: C_REG, type_: 6, size: 4},
160 {as: ASLW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
161 {as: ASLD, a1: C_REG, a6: C_REG, type_: 6, size: 4},
162 {as: ASLD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
163 {as: ASLD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 25, size: 4},
164 {as: ASLD, a1: C_SCON, a6: C_REG, type_: 25, size: 4},
165 {as: AEXTSWSLI, a1: C_SCON, a6: C_REG, type_: 25, size: 4},
166 {as: AEXTSWSLI, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 25, size: 4},
167 {as: ASLW, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 57, size: 4},
168 {as: ASLW, a1: C_SCON, a6: C_REG, type_: 57, size: 4},
169 {as: ASRAW, a1: C_REG, a6: C_REG, type_: 6, size: 4},
170 {as: ASRAW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
171 {as: ASRAW, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 56, size: 4},
172 {as: ASRAW, a1: C_SCON, a6: C_REG, type_: 56, size: 4},
173 {as: ASRAD, a1: C_REG, a6: C_REG, type_: 6, size: 4},
174 {as: ASRAD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
175 {as: ASRAD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 56, size: 4},
176 {as: ASRAD, a1: C_SCON, a6: C_REG, type_: 56, size: 4},
177 {as: ARLWMI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 62, size: 4},
178 {as: ARLWMI, a1: C_SCON, a2: C_REG, a3: C_SCON, a4: C_SCON, a6: C_REG, type_: 102, size: 4},
179 {as: ARLWMI, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 63, size: 4},
180 {as: ARLWMI, a1: C_REG, a2: C_REG, a3: C_SCON, a4: C_SCON, a6: C_REG, type_: 103, size: 4},
181 {as: ACLRLSLWI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 62, size: 4},
182 {as: ARLDMI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 30, size: 4},
183 {as: ARLDC, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 29, size: 4},
184 {as: ARLDCL, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 29, size: 4},
185 {as: ARLDCL, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
186 {as: ARLDICL, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
187 {as: ARLDICL, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
188 {as: ARLDCL, a1: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
189 {as: AFADD, a1: C_FREG, a6: C_FREG, type_: 2, size: 4},
190 {as: AFADD, a1: C_FREG, a2: C_FREG, a6: C_FREG, type_: 2, size: 4},
191 {as: AFABS, a1: C_FREG, a6: C_FREG, type_: 33, size: 4},
192 {as: AFABS, a6: C_FREG, type_: 33, size: 4},
193 {as: AFMADD, a1: C_FREG, a2: C_FREG, a3: C_FREG, a6: C_FREG, type_: 34, size: 4},
194 {as: AFMUL, a1: C_FREG, a6: C_FREG, type_: 32, size: 4},
195 {as: AFMUL, a1: C_FREG, a2: C_FREG, a6: C_FREG, type_: 32, size: 4},
197 {as: AMOVBU, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
198 {as: AMOVBU, a1: C_SOREG, a6: C_REG, type_: 9, size: 8},
200 {as: AMOVBZU, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
201 {as: AMOVBZU, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
203 {as: AMOVHBR, a1: C_REG, a6: C_ZOREG, type_: 44, size: 4},
204 {as: AMOVHBR, a1: C_ZOREG, a6: C_REG, type_: 45, size: 4},
206 {as: AMOVB, a1: C_ADDR, a6: C_REG, type_: 76, size: 12},
207 {as: AMOVB, a1: C_LOREG, a6: C_REG, type_: 37, size: 12},
208 {as: AMOVB, a1: C_SOREG, a6: C_REG, type_: 9, size: 8},
209 {as: AMOVB, a1: C_REG, a6: C_ADDR, type_: 74, size: 8},
210 {as: AMOVB, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
211 {as: AMOVB, a1: C_REG, a6: C_LOREG, type_: 35, size: 8},
212 {as: AMOVB, a1: C_REG, a6: C_REG, type_: 13, size: 4},
214 {as: AMOVBZ, a1: C_ADDR, a6: C_REG, type_: 75, size: 8},
215 {as: AMOVBZ, a1: C_LOREG, a6: C_REG, type_: 36, size: 8},
216 {as: AMOVBZ, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
217 {as: AMOVBZ, a1: C_REG, a6: C_ADDR, type_: 74, size: 8},
218 {as: AMOVBZ, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
219 {as: AMOVBZ, a1: C_REG, a6: C_LOREG, type_: 35, size: 8},
220 {as: AMOVBZ, a1: C_REG, a6: C_REG, type_: 13, size: 4},
222 {as: AMOVD, a1: C_ADDCON, a6: C_REG, type_: 3, size: 4},
223 {as: AMOVD, a1: C_ANDCON, a6: C_REG, type_: 3, size: 4},
224 {as: AMOVD, a1: C_UCON, a6: C_REG, type_: 3, size: 4},
225 {as: AMOVD, a1: C_LCON, a6: C_REG, type_: 19, size: 8},
226 {as: AMOVD, a1: C_SACON, a6: C_REG, type_: 3, size: 4},
227 {as: AMOVD, a1: C_LACON, a6: C_REG, type_: 26, size: 8},
228 {as: AMOVD, a1: C_ADDR, a6: C_REG, type_: 75, size: 8},
229 {as: AMOVD, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
230 {as: AMOVD, a1: C_LOREG, a6: C_REG, type_: 36, size: 8},
231 {as: AMOVD, a1: C_TLS_LE, a6: C_REG, type_: 79, size: 8},
232 {as: AMOVD, a1: C_TLS_IE, a6: C_REG, type_: 80, size: 12},
233 {as: AMOVD, a1: C_SPR, a6: C_REG, type_: 66, size: 4},
234 {as: AMOVD, a1: C_REG, a6: C_ADDR, type_: 74, size: 8},
235 {as: AMOVD, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
236 {as: AMOVD, a1: C_REG, a6: C_LOREG, type_: 35, size: 8},
237 {as: AMOVD, a1: C_REG, a6: C_SPR, type_: 66, size: 4},
238 {as: AMOVD, a1: C_REG, a6: C_REG, type_: 13, size: 4},
240 {as: AMOVW, a1: C_ADDCON, a6: C_REG, type_: 3, size: 4},
241 {as: AMOVW, a1: C_ANDCON, a6: C_REG, type_: 3, size: 4},
242 {as: AMOVW, a1: C_UCON, a6: C_REG, type_: 3, size: 4},
243 {as: AMOVW, a1: C_LCON, a6: C_REG, type_: 19, size: 8},
244 {as: AMOVW, a1: C_SACON, a6: C_REG, type_: 3, size: 4},
245 {as: AMOVW, a1: C_LACON, a6: C_REG, type_: 26, size: 8},
246 {as: AMOVW, a1: C_ADDR, a6: C_REG, type_: 75, size: 8},
247 {as: AMOVW, a1: C_CREG, a6: C_REG, type_: 68, size: 4},
248 {as: AMOVW, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
249 {as: AMOVW, a1: C_LOREG, a6: C_REG, type_: 36, size: 8},
250 {as: AMOVW, a1: C_SPR, a6: C_REG, type_: 66, size: 4},
251 {as: AMOVW, a1: C_REG, a6: C_ADDR, type_: 74, size: 8},
252 {as: AMOVW, a1: C_REG, a6: C_CREG, type_: 69, size: 4},
253 {as: AMOVW, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
254 {as: AMOVW, a1: C_REG, a6: C_LOREG, type_: 35, size: 8},
255 {as: AMOVW, a1: C_REG, a6: C_SPR, type_: 66, size: 4},
256 {as: AMOVW, a1: C_REG, a6: C_REG, type_: 13, size: 4},
258 {as: AFMOVD, a1: C_ADDCON, a6: C_FREG, type_: 24, size: 8},
259 {as: AFMOVD, a1: C_SOREG, a6: C_FREG, type_: 8, size: 4},
260 {as: AFMOVD, a1: C_LOREG, a6: C_FREG, type_: 36, size: 8},
261 {as: AFMOVD, a1: C_ZCON, a6: C_FREG, type_: 24, size: 4},
262 {as: AFMOVD, a1: C_ADDR, a6: C_FREG, type_: 75, size: 8},
263 {as: AFMOVD, a1: C_FREG, a6: C_FREG, type_: 33, size: 4},
264 {as: AFMOVD, a1: C_FREG, a6: C_SOREG, type_: 7, size: 4},
265 {as: AFMOVD, a1: C_FREG, a6: C_LOREG, type_: 35, size: 8},
266 {as: AFMOVD, a1: C_FREG, a6: C_ADDR, type_: 74, size: 8},
268 {as: AFMOVSX, a1: C_ZOREG, a6: C_FREG, type_: 45, size: 4},
269 {as: AFMOVSX, a1: C_FREG, a6: C_ZOREG, type_: 44, size: 4},
271 {as: AFMOVSZ, a1: C_ZOREG, a6: C_FREG, type_: 45, size: 4},
273 {as: AMOVFL, a1: C_CREG, a6: C_CREG, type_: 67, size: 4},
274 {as: AMOVFL, a1: C_FPSCR, a6: C_CREG, type_: 73, size: 4},
275 {as: AMOVFL, a1: C_FPSCR, a6: C_FREG, type_: 53, size: 4},
276 {as: AMOVFL, a1: C_FREG, a3: C_LCON, a6: C_FPSCR, type_: 64, size: 4},
277 {as: AMOVFL, a1: C_FREG, a6: C_FPSCR, type_: 64, size: 4},
278 {as: AMOVFL, a1: C_LCON, a6: C_FPSCR, type_: 65, size: 4},
279 {as: AMOVFL, a1: C_REG, a6: C_CREG, type_: 69, size: 4},
280 {as: AMOVFL, a1: C_REG, a6: C_LCON, type_: 69, size: 4},
282 {as: ASYSCALL, type_: 5, size: 4},
283 {as: ASYSCALL, a1: C_REG, type_: 77, size: 12},
284 {as: ASYSCALL, a1: C_SCON, type_: 77, size: 12},
285 {as: ABEQ, a6: C_SBRA, type_: 16, size: 4},
286 {as: ABEQ, a1: C_CREG, a6: C_SBRA, type_: 16, size: 4},
287 {as: ABR, a6: C_LBRA, type_: 11, size: 4},
288 {as: ABR, a6: C_LBRAPIC, type_: 11, size: 8},
289 {as: ABC, a1: C_SCON, a2: C_REG, a6: C_SBRA, type_: 16, size: 4},
290 {as: ABC, a1: C_SCON, a2: C_REG, a6: C_LBRA, type_: 17, size: 4},
291 {as: ABR, a6: C_LR, type_: 18, size: 4},
292 {as: ABR, a3: C_SCON, a6: C_LR, type_: 18, size: 4},
293 {as: ABR, a6: C_CTR, type_: 18, size: 4},
294 {as: ABR, a1: C_REG, a6: C_CTR, type_: 18, size: 4},
295 {as: ABR, a6: C_ZOREG, type_: 15, size: 8},
296 {as: ABC, a2: C_REG, a6: C_LR, type_: 18, size: 4},
297 {as: ABC, a2: C_REG, a6: C_CTR, type_: 18, size: 4},
298 {as: ABC, a1: C_SCON, a2: C_REG, a6: C_LR, type_: 18, size: 4},
299 {as: ABC, a1: C_SCON, a2: C_REG, a6: C_CTR, type_: 18, size: 4},
300 {as: ABC, a6: C_ZOREG, type_: 15, size: 8},
301 {as: ASYNC, type_: 46, size: 4},
302 {as: AWORD, a1: C_LCON, type_: 40, size: 4},
303 {as: ADWORD, a1: C_LCON, type_: 31, size: 8},
304 {as: ADWORD, a1: C_DCON, type_: 31, size: 8},
305 {as: ADWORD, a1: C_LACON, type_: 31, size: 8},
306 {as: AADDME, a1: C_REG, a6: C_REG, type_: 47, size: 4},
307 {as: AEXTSB, a1: C_REG, a6: C_REG, type_: 48, size: 4},
308 {as: AEXTSB, a6: C_REG, type_: 48, size: 4},
309 {as: AISEL, a1: C_LCON, a2: C_REG, a3: C_REG, a6: C_REG, type_: 84, size: 4},
310 {as: AISEL, a1: C_ZCON, a2: C_REG, a3: C_REG, a6: C_REG, type_: 84, size: 4},
311 {as: ANEG, a1: C_REG, a6: C_REG, type_: 47, size: 4},
312 {as: ANEG, a6: C_REG, type_: 47, size: 4},
313 {as: AREM, a1: C_REG, a6: C_REG, type_: 50, size: 12},
314 {as: AREM, a1: C_REG, a2: C_REG, a6: C_REG, type_: 50, size: 12},
315 {as: AREMU, a1: C_REG, a6: C_REG, type_: 50, size: 16},
316 {as: AREMU, a1: C_REG, a2: C_REG, a6: C_REG, type_: 50, size: 16},
317 {as: AREMD, a1: C_REG, a6: C_REG, type_: 51, size: 12},
318 {as: AREMD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 51, size: 12},
319 {as: AMTFSB0, a1: C_SCON, type_: 52, size: 4},
320 /* Other ISA 2.05+ instructions */
321 {as: APOPCNTD, a1: C_REG, a6: C_REG, type_: 93, size: 4}, /* population count, x-form */
322 {as: ACMPB, a1: C_REG, a2: C_REG, a6: C_REG, type_: 92, size: 4}, /* compare byte, x-form */
323 {as: ACMPEQB, a1: C_REG, a2: C_REG, a6: C_CREG, type_: 92, size: 4}, /* compare equal byte, x-form, ISA 3.0 */
324 {as: ACMPEQB, a1: C_REG, a6: C_REG, type_: 70, size: 4},
325 {as: AFTDIV, a1: C_FREG, a2: C_FREG, a6: C_SCON, type_: 92, size: 4}, /* floating test for sw divide, x-form */
326 {as: AFTSQRT, a1: C_FREG, a6: C_SCON, type_: 93, size: 4}, /* floating test for sw square root, x-form */
327 {as: ACOPY, a1: C_REG, a6: C_REG, type_: 92, size: 4}, /* copy/paste facility, x-form */
328 {as: ADARN, a1: C_SCON, a6: C_REG, type_: 92, size: 4}, /* deliver random number, x-form */
329 {as: ALDMX, a1: C_SOREG, a6: C_REG, type_: 45, size: 4}, /* load doubleword monitored, x-form */
330 {as: AMADDHD, a1: C_REG, a2: C_REG, a3: C_REG, a6: C_REG, type_: 83, size: 4}, /* multiply-add high/low doubleword, va-form */
331 {as: AADDEX, a1: C_REG, a2: C_REG, a3: C_SCON, a6: C_REG, type_: 94, size: 4}, /* add extended using alternate carry, z23-form */
332 {as: ACRAND, a1: C_CREG, a6: C_CREG, type_: 2, size: 4}, /* logical ops for condition registers xl-form */
334 /* Vector instructions */
337 {as: ALV, a1: C_SOREG, a6: C_VREG, type_: 45, size: 4}, /* vector load, x-form */
340 {as: ASTV, a1: C_VREG, a6: C_SOREG, type_: 44, size: 4}, /* vector store, x-form */
343 {as: AVAND, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector and, vx-form */
344 {as: AVOR, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector or, vx-form */
347 {as: AVADDUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add unsigned modulo, vx-form */
348 {as: AVADDCU, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add & write carry unsigned, vx-form */
349 {as: AVADDUS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add unsigned saturate, vx-form */
350 {as: AVADDSS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add signed saturate, vx-form */
351 {as: AVADDE, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector add extended, va-form */
353 /* Vector subtract */
354 {as: AVSUBUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract unsigned modulo, vx-form */
355 {as: AVSUBCU, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract & write carry unsigned, vx-form */
356 {as: AVSUBUS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract unsigned saturate, vx-form */
357 {as: AVSUBSS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract signed saturate, vx-form */
358 {as: AVSUBE, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector subtract extended, va-form */
360 /* Vector multiply */
361 {as: AVMULESB, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector multiply, vx-form */
362 {as: AVPMSUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector polynomial multiply & sum, vx-form */
363 {as: AVMSUMUDM, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector multiply-sum, va-form */
366 {as: AVR, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector rotate, vx-form */
369 {as: AVS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector shift, vx-form */
370 {as: AVSA, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector shift algebraic, vx-form */
371 {as: AVSOI, a1: C_ANDCON, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector shift by octet immediate, va-form */
374 {as: AVCLZ, a1: C_VREG, a6: C_VREG, type_: 85, size: 4}, /* vector count leading zeros, vx-form */
375 {as: AVPOPCNT, a1: C_VREG, a6: C_VREG, type_: 85, size: 4}, /* vector population count, vx-form */
378 {as: AVCMPEQ, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare equal, vc-form */
379 {as: AVCMPGT, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare greater than, vc-form */
380 {as: AVCMPNEZB, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare not equal, vx-form */
383 {as: AVMRGOW, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector merge odd word, vx-form */
386 {as: AVPERM, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector permute, va-form */
388 /* Vector bit permute */
389 {as: AVBPERMQ, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector bit permute, vx-form */
392 {as: AVSEL, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector select, va-form */
395 {as: AVSPLTB, a1: C_SCON, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector splat, vx-form */
396 {as: AVSPLTB, a1: C_ADDCON, a2: C_VREG, a6: C_VREG, type_: 82, size: 4},
397 {as: AVSPLTISB, a1: C_SCON, a6: C_VREG, type_: 82, size: 4}, /* vector splat immediate, vx-form */
398 {as: AVSPLTISB, a1: C_ADDCON, a6: C_VREG, type_: 82, size: 4},
401 {as: AVCIPH, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES cipher, vx-form */
402 {as: AVNCIPH, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES inverse cipher, vx-form */
403 {as: AVSBOX, a1: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES subbytes, vx-form */
406 {as: AVSHASIGMA, a1: C_ANDCON, a2: C_VREG, a3: C_ANDCON, a6: C_VREG, type_: 82, size: 4}, /* vector SHA sigma, vx-form */
408 /* VSX vector load */
409 {as: ALXVD2X, a1: C_SOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx vector load, xx1-form */
410 {as: ALXV, a1: C_SOREG, a6: C_VSREG, type_: 96, size: 4}, /* vsx vector load, dq-form */
411 {as: ALXVL, a1: C_REG, a2: C_REG, a6: C_VSREG, type_: 98, size: 4}, /* vsx vector load length */
413 /* VSX vector store */
414 {as: ASTXVD2X, a1: C_VSREG, a6: C_SOREG, type_: 86, size: 4}, /* vsx vector store, xx1-form */
415 {as: ASTXV, a1: C_VSREG, a6: C_SOREG, type_: 97, size: 4}, /* vsx vector store, dq-form */
416 {as: ASTXVL, a1: C_VSREG, a2: C_REG, a6: C_REG, type_: 99, size: 4}, /* vsx vector store with length x-form */
418 /* VSX scalar load */
419 {as: ALXSDX, a1: C_SOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx scalar load, xx1-form */
421 /* VSX scalar store */
422 {as: ASTXSDX, a1: C_VSREG, a6: C_SOREG, type_: 86, size: 4}, /* vsx scalar store, xx1-form */
424 /* VSX scalar as integer load */
425 {as: ALXSIWAX, a1: C_SOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx scalar as integer load, xx1-form */
427 /* VSX scalar store as integer */
428 {as: ASTXSIWX, a1: C_VSREG, a6: C_SOREG, type_: 86, size: 4}, /* vsx scalar as integer store, xx1-form */
430 /* VSX move from VSR */
431 {as: AMFVSRD, a1: C_VSREG, a6: C_REG, type_: 88, size: 4}, /* vsx move from vsr, xx1-form */
432 {as: AMFVSRD, a1: C_FREG, a6: C_REG, type_: 88, size: 4},
433 {as: AMFVSRD, a1: C_VREG, a6: C_REG, type_: 88, size: 4},
435 /* VSX move to VSR */
436 {as: AMTVSRD, a1: C_REG, a6: C_VSREG, type_: 88, size: 4}, /* vsx move to vsr, xx1-form */
437 {as: AMTVSRD, a1: C_REG, a2: C_REG, a6: C_VSREG, type_: 88, size: 4},
438 {as: AMTVSRD, a1: C_REG, a6: C_FREG, type_: 88, size: 4},
439 {as: AMTVSRD, a1: C_REG, a6: C_VREG, type_: 88, size: 4},
442 {as: AXXLAND, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx and, xx3-form */
443 {as: AXXLOR, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx or, xx3-form */
446 {as: AXXSEL, a1: C_VSREG, a2: C_VSREG, a3: C_VSREG, a6: C_VSREG, type_: 91, size: 4}, /* vsx select, xx4-form */
449 {as: AXXMRGHW, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx merge, xx3-form */
452 {as: AXXSPLTW, a1: C_VSREG, a3: C_SCON, a6: C_VSREG, type_: 89, size: 4}, /* vsx splat, xx2-form */
453 {as: AXXSPLTIB, a1: C_SCON, a6: C_VSREG, type_: 100, size: 4}, /* vsx splat, xx2-form */
456 {as: AXXPERM, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx permute, xx3-form */
459 {as: AXXSLDWI, a1: C_VSREG, a2: C_VSREG, a3: C_SCON, a6: C_VSREG, type_: 90, size: 4}, /* vsx shift immediate, xx3-form */
461 /* VSX reverse bytes */
462 {as: AXXBRQ, a1: C_VSREG, a6: C_VSREG, type_: 101, size: 4}, /* vsx reverse bytes */
464 /* VSX scalar FP-FP conversion */
465 {as: AXSCVDPSP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar fp-fp conversion, xx2-form */
467 /* VSX vector FP-FP conversion */
468 {as: AXVCVDPSP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector fp-fp conversion, xx2-form */
470 /* VSX scalar FP-integer conversion */
471 {as: AXSCVDPSXDS, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar fp-integer conversion, xx2-form */
473 /* VSX scalar integer-FP conversion */
474 {as: AXSCVSXDDP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar integer-fp conversion, xx2-form */
476 /* VSX vector FP-integer conversion */
477 {as: AXVCVDPSXDS, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector fp-integer conversion, xx2-form */
479 /* VSX vector integer-FP conversion */
480 {as: AXVCVSXDDP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector integer-fp conversion, xx2-form */
482 {as: ACMP, a1: C_REG, a6: C_REG, type_: 70, size: 4},
483 {as: ACMP, a1: C_REG, a2: C_REG, a6: C_REG, type_: 70, size: 4},
484 {as: ACMP, a1: C_REG, a6: C_ADDCON, type_: 71, size: 4},
485 {as: ACMP, a1: C_REG, a2: C_REG, a6: C_ADDCON, type_: 71, size: 4},
486 {as: ACMPU, a1: C_REG, a6: C_REG, type_: 70, size: 4},
487 {as: ACMPU, a1: C_REG, a2: C_REG, a6: C_REG, type_: 70, size: 4},
488 {as: ACMPU, a1: C_REG, a6: C_ANDCON, type_: 71, size: 4},
489 {as: ACMPU, a1: C_REG, a2: C_REG, a6: C_ANDCON, type_: 71, size: 4},
490 {as: AFCMPO, a1: C_FREG, a6: C_FREG, type_: 70, size: 4},
491 {as: AFCMPO, a1: C_FREG, a2: C_REG, a6: C_FREG, type_: 70, size: 4},
492 {as: ATW, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 60, size: 4},
493 {as: ATW, a1: C_LCON, a2: C_REG, a6: C_ADDCON, type_: 61, size: 4},
494 {as: ADCBF, a1: C_ZOREG, type_: 43, size: 4},
495 {as: ADCBF, a1: C_SOREG, type_: 43, size: 4},
496 {as: ADCBF, a1: C_ZOREG, a2: C_REG, a6: C_SCON, type_: 43, size: 4},
497 {as: ADCBF, a1: C_SOREG, a6: C_SCON, type_: 43, size: 4},
498 {as: AECOWX, a1: C_REG, a2: C_REG, a6: C_ZOREG, type_: 44, size: 4},
499 {as: AECIWX, a1: C_ZOREG, a2: C_REG, a6: C_REG, type_: 45, size: 4},
500 {as: AECOWX, a1: C_REG, a6: C_ZOREG, type_: 44, size: 4},
501 {as: AECIWX, a1: C_ZOREG, a6: C_REG, type_: 45, size: 4},
502 {as: ALDAR, a1: C_ZOREG, a6: C_REG, type_: 45, size: 4},
503 {as: ALDAR, a1: C_ZOREG, a3: C_ANDCON, a6: C_REG, type_: 45, size: 4},
504 {as: AEIEIO, type_: 46, size: 4},
505 {as: ATLBIE, a1: C_REG, type_: 49, size: 4},
506 {as: ATLBIE, a1: C_SCON, a6: C_REG, type_: 49, size: 4},
507 {as: ASLBMFEE, a1: C_REG, a6: C_REG, type_: 55, size: 4},
508 {as: ASLBMTE, a1: C_REG, a6: C_REG, type_: 55, size: 4},
509 {as: ASTSW, a1: C_REG, a6: C_ZOREG, type_: 44, size: 4},
510 {as: ASTSW, a1: C_REG, a3: C_LCON, a6: C_ZOREG, type_: 41, size: 4},
511 {as: ALSW, a1: C_ZOREG, a6: C_REG, type_: 45, size: 4},
512 {as: ALSW, a1: C_ZOREG, a3: C_LCON, a6: C_REG, type_: 42, size: 4},
513 {as: obj.AUNDEF, type_: 78, size: 4},
514 {as: obj.APCDATA, a1: C_LCON, a6: C_LCON, type_: 0, size: 0},
515 {as: obj.AFUNCDATA, a1: C_SCON, a6: C_ADDR, type_: 0, size: 0},
516 {as: obj.ANOP, type_: 0, size: 0},
517 {as: obj.ANOP, a1: C_LCON, type_: 0, size: 0}, // NOP operand variations added for #40689
518 {as: obj.ANOP, a1: C_REG, type_: 0, size: 0}, // to preserve previous behavior
519 {as: obj.ANOP, a1: C_FREG, type_: 0, size: 0},
520 {as: obj.ADUFFZERO, a6: C_LBRA, type_: 11, size: 4}, // same as ABR/ABL
521 {as: obj.ADUFFCOPY, a6: C_LBRA, type_: 11, size: 4}, // same as ABR/ABL
522 {as: obj.APCALIGN, a1: C_LCON, type_: 0, size: 0}, // align code
524 {as: obj.AXXX, type_: 0, size: 4},
527 var oprange [ALAST & obj.AMask][]Optab
529 var xcmp [C_NCLASS][C_NCLASS]bool
531 // padding bytes to add to align code as requested
532 func addpad(pc, a int64, ctxt *obj.Link, cursym *obj.LSym) int {
533 // For 16 and 32 byte alignment, there is a tradeoff
534 // between aligning the code and adding too many NOPs.
541 // Align to 16 bytes if possible but add at
550 // Align to 32 bytes if possible but add at
560 // When 32 byte alignment is requested on Linux,
561 // promote the function's alignment to 32. On AIX
562 // the function alignment is not changed which might
563 // result in 16 byte alignment but that is still fine.
564 // TODO: alignment on AIX
565 if ctxt.Headtype != objabi.Haix && cursym.Func().Align < 32 {
566 cursym.Func().Align = 32
569 ctxt.Diag("Unexpected alignment: %d for PCALIGN directive\n", a)
574 // Get the implied register of a operand which doesn't specify one. These show up
575 // in handwritten asm like "MOVD R5, foosymbol" where a base register is not supplied,
576 // or "MOVD R5, foo+10(SP) or pseudo-register is used. The other common case is when
577 // generating constants in register like "MOVD $constant, Rx".
578 func (c *ctxt9) getimpliedreg(a *obj.Addr, p *obj.Prog) int {
580 case C_ADDCON, C_ANDCON, C_UCON, C_LCON, C_SCON, C_ZCON:
582 case C_SACON, C_LACON:
584 case C_LOREG, C_SOREG, C_ZOREG:
586 case obj.NAME_EXTERN, obj.NAME_STATIC:
588 case obj.NAME_AUTO, obj.NAME_PARAM:
594 c.ctxt.Diag("failed to determine implied reg for class %v (%v)", DRconv(oclass(a)), p)
598 func span9(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
599 p := cursym.Func().Text
600 if p == nil || p.Link == nil { // handle external functions and ELF section symbols
604 if oprange[AANDN&obj.AMask] == nil {
605 ctxt.Diag("ppc64 ops not initialized, call ppc64.buildop first")
608 c := ctxt9{ctxt: ctxt, newprog: newprog, cursym: cursym, autosize: int32(p.To.Offset)}
615 for p = p.Link; p != nil; p = p.Link {
620 if p.As == obj.APCALIGN {
621 a := c.vregoff(&p.From)
622 m = addpad(pc, a, ctxt, cursym)
624 if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
625 ctxt.Diag("zero-width instruction\n%v", p)
636 * if any procedure is large enough to
637 * generate a large SBRA branch, then
638 * generate extra passes putting branches
639 * around jmps to fix. this is rare.
648 for p = c.cursym.Func().Text.Link; p != nil; p = p.Link {
652 // very large conditional branches
653 if (o.type_ == 16 || o.type_ == 17) && p.To.Target() != nil {
654 otxt = p.To.Target().Pc - pc
655 if otxt < -(1<<15)+10 || otxt >= (1<<15)-10 {
660 q.To.Type = obj.TYPE_BRANCH
661 q.To.SetTarget(p.To.Target())
667 q.To.Type = obj.TYPE_BRANCH
668 q.To.SetTarget(q.Link.Link)
678 if p.As == obj.APCALIGN {
679 a := c.vregoff(&p.From)
680 m = addpad(pc, a, ctxt, cursym)
682 if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
683 ctxt.Diag("zero-width instruction\n%v", p)
695 if r := pc & funcAlignMask; r != 0 {
702 * lay out the code, emitting code and data relocations.
705 c.cursym.Grow(c.cursym.Size)
710 for p := c.cursym.Func().Text.Link; p != nil; p = p.Link {
713 if int(o.size) > 4*len(out) {
714 log.Fatalf("out array in span9 is too small, need at least %d for %v", o.size/4, p)
716 // asmout is not set up to add large amounts of padding
717 if o.type_ == 0 && p.As == obj.APCALIGN {
718 pad := LOP_RRR(OP_OR, REGZERO, REGZERO, REGZERO)
719 aln := c.vregoff(&p.From)
720 v := addpad(p.Pc, aln, c.ctxt, c.cursym)
722 // Same padding instruction for all
723 for i = 0; i < int32(v/4); i++ {
724 c.ctxt.Arch.ByteOrder.PutUint32(bp, pad)
729 c.asmout(p, o, out[:])
730 for i = 0; i < int32(o.size/4); i++ {
731 c.ctxt.Arch.ByteOrder.PutUint32(bp, out[i])
738 func isint32(v int64) bool {
739 return int64(int32(v)) == v
742 func isuint32(v uint64) bool {
743 return uint64(uint32(v)) == v
746 func (c *ctxt9) aclass(a *obj.Addr) int {
752 if REG_R0 <= a.Reg && a.Reg <= REG_R31 {
755 if REG_F0 <= a.Reg && a.Reg <= REG_F31 {
758 if REG_V0 <= a.Reg && a.Reg <= REG_V31 {
761 if REG_VS0 <= a.Reg && a.Reg <= REG_VS63 {
764 if REG_CR0 <= a.Reg && a.Reg <= REG_CR7 || a.Reg == REG_CR {
767 if REG_SPR0 <= a.Reg && a.Reg <= REG_SPR0+1023 {
782 if REG_DCR0 <= a.Reg && a.Reg <= REG_DCR0+1023 {
785 if a.Reg == REG_FPSCR {
792 case obj.NAME_GOTREF, obj.NAME_TOCREF:
795 case obj.NAME_EXTERN,
797 c.instoffset = a.Offset
800 } else if a.Sym.Type == objabi.STLSBSS {
801 // For PIC builds, use 12 byte got initial-exec TLS accesses.
802 if c.ctxt.Flag_shared {
805 // Otherwise, use 8 byte local-exec TLS accesses.
812 c.instoffset = int64(c.autosize) + a.Offset
813 if c.instoffset >= -BIG && c.instoffset < BIG {
819 c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.FixedFrameSize()
820 if c.instoffset >= -BIG && c.instoffset < BIG {
826 c.instoffset = a.Offset
827 if c.instoffset == 0 {
830 if c.instoffset >= -BIG && c.instoffset < BIG {
838 case obj.TYPE_TEXTSIZE:
841 case obj.TYPE_FCONST:
842 // The only cases where FCONST will occur are with float64 +/- 0.
843 // All other float constants are generated in memory.
844 f64 := a.Val.(float64)
846 if math.Signbit(f64) {
851 log.Fatalf("Unexpected nonzero FCONST operand %v", a)
857 c.instoffset = a.Offset
859 if -BIG <= c.instoffset && c.instoffset <= BIG {
862 if isint32(c.instoffset) {
868 case obj.NAME_EXTERN,
874 c.instoffset = a.Offset
878 c.instoffset = int64(c.autosize) + a.Offset
879 if c.instoffset >= -BIG && c.instoffset < BIG {
885 c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.FixedFrameSize()
886 if c.instoffset >= -BIG && c.instoffset < BIG {
895 if c.instoffset >= 0 {
896 if c.instoffset == 0 {
899 if c.instoffset <= 0x7fff {
902 if c.instoffset <= 0xffff {
905 if c.instoffset&0xffff == 0 && isuint32(uint64(c.instoffset)) { /* && (instoffset & (1<<31)) == 0) */
908 if isint32(c.instoffset) || isuint32(uint64(c.instoffset)) {
914 if c.instoffset >= -0x8000 {
917 if c.instoffset&0xffff == 0 && isint32(c.instoffset) {
920 if isint32(c.instoffset) {
925 case obj.TYPE_BRANCH:
926 if a.Sym != nil && c.ctxt.Flag_dynlink {
935 func prasm(p *obj.Prog) {
936 fmt.Printf("%v\n", p)
939 func (c *ctxt9) oplook(p *obj.Prog) *Optab {
944 a1 = int(p.From.Class)
946 a1 = c.aclass(&p.From) + 1
947 p.From.Class = int8(a1)
951 argsv := [3]int{C_NONE + 1, C_NONE + 1, C_NONE + 1}
952 for i, ap := range p.RestArgs {
953 argsv[i] = int(ap.Addr.Class)
955 argsv[i] = c.aclass(&ap.Addr) + 1
956 ap.Addr.Class = int8(argsv[i])
964 a6 := int(p.To.Class)
966 a6 = c.aclass(&p.To) + 1
967 p.To.Class = int8(a6)
973 if REG_R0 <= p.Reg && p.Reg <= REG_R31 {
975 } else if REG_V0 <= p.Reg && p.Reg <= REG_V31 {
977 } else if REG_VS0 <= p.Reg && p.Reg <= REG_VS63 {
979 } else if REG_F0 <= p.Reg && p.Reg <= REG_F31 {
984 // c.ctxt.Logf("oplook %v %d %d %d %d\n", p, a1, a2, a3, a4, a5, a6)
985 ops := oprange[p.As&obj.AMask]
993 if int(op.a2) == a2 && c1[op.a1] && c3[op.a3] && c4[op.a4] && c5[op.a5] && c6[op.a6] {
994 p.Optab = uint16(cap(optab) - cap(ops) + i + 1)
999 c.ctxt.Diag("illegal combination %v %v %v %v %v %v %v", p.As, DRconv(a1), DRconv(a2), DRconv(a3), DRconv(a4), DRconv(a5), DRconv(a6))
1007 func cmp(a int, b int) bool {
1013 if b == C_ZCON || b == C_SCON || b == C_UCON || b == C_ADDCON || b == C_ANDCON {
1018 if b == C_ZCON || b == C_SCON {
1023 if b == C_ZCON || b == C_SCON {
1028 if b == C_LR || b == C_XER || b == C_CTR {
1058 if b == C_SOREG || b == C_ZOREG {
1064 return r0iszero != 0 /*TypeKind(100016)*/
1076 func (x ocmp) Len() int {
1080 func (x ocmp) Swap(i, j int) {
1081 x[i], x[j] = x[j], x[i]
1084 // Used when sorting the optab. Sorting is
1085 // done in a way so that the best choice of
1086 // opcode/operand combination is considered first.
1087 func (x ocmp) Less(i, j int) bool {
1090 n := int(p1.as) - int(p2.as)
1095 // Consider those that generate fewer
1096 // instructions first.
1097 n = int(p1.size) - int(p2.size)
1101 // operand order should match
1102 // better choices first
1103 n = int(p1.a1) - int(p2.a1)
1107 n = int(p1.a2) - int(p2.a2)
1111 n = int(p1.a3) - int(p2.a3)
1115 n = int(p1.a4) - int(p2.a4)
1119 n = int(p1.a5) - int(p2.a5)
1123 n = int(p1.a6) - int(p2.a6)
1130 // Add an entry to the opcode table for
1131 // a new opcode b0 with the same operand combinations
1133 func opset(a, b0 obj.As) {
1134 oprange[a&obj.AMask] = oprange[b0]
1137 // Build the opcode table
1138 func buildop(ctxt *obj.Link) {
1139 if oprange[AANDN&obj.AMask] != nil {
1140 // Already initialized; stop now.
1141 // This happens in the cmd/asm tests,
1142 // each of which re-initializes the arch.
1148 for i := 0; i < C_NCLASS; i++ {
1149 for n = 0; n < C_NCLASS; n++ {
1155 for n = 0; optab[n].as != obj.AXXX; n++ {
1157 sort.Sort(ocmp(optab[:n]))
1158 for i := 0; i < n; i++ {
1162 for optab[i].as == r {
1165 oprange[r0] = optab[start:i]
1170 ctxt.Diag("unknown op in build: %v", r)
1171 log.Fatalf("instruction missing from switch in asm9.go:buildop: %v", r)
1173 case ADCBF: /* unary indexed: op (b+a); op (b) */
1182 case AECOWX: /* indexed store: op s,(b+a); op s,(b) */
1188 case AREM: /* macro */
1200 case ADIVW: /* op Rb[,Ra],Rd */
1205 opset(AMULHWUCC, r0)
1207 opset(AMULLWVCC, r0)
1215 opset(ADIVWUVCC, r0)
1232 opset(AMULHDUCC, r0)
1234 opset(AMULLDVCC, r0)
1241 opset(ADIVDEUCC, r0)
1246 opset(ADIVDUVCC, r0)
1258 case APOPCNTD: /* popcntd, popcntw, popcntb, cnttzw, cnttzd */
1262 opset(ACNTTZWCC, r0)
1264 opset(ACNTTZDCC, r0)
1266 case ACOPY: /* copy, paste. */
1269 case AMADDHD: /* maddhd, maddhdu, maddld */
1273 case AMOVBZ: /* lbz, stz, rlwm(r/r), lhz, lha, stz, and x variants */
1277 case AMOVBZU: /* lbz[x]u, stb[x]u, lhz[x]u, lha[x]u, sth[u]x, ld[x]u, std[u]x */
1286 case ALV: /* lvebx, lvehx, lvewx, lvx, lvxl, lvsl, lvsr */
1295 case ASTV: /* stvebx, stvehx, stvewx, stvx, stvxl */
1302 case AVAND: /* vand, vandc, vnand */
1307 case AVMRGOW: /* vmrgew, vmrgow */
1310 case AVOR: /* vor, vorc, vxor, vnor, veqv */
1317 case AVADDUM: /* vaddubm, vadduhm, vadduwm, vaddudm, vadduqm */
1324 case AVADDCU: /* vaddcuq, vaddcuw */
1328 case AVADDUS: /* vaddubs, vadduhs, vadduws */
1333 case AVADDSS: /* vaddsbs, vaddshs, vaddsws */
1338 case AVADDE: /* vaddeuqm, vaddecuq */
1339 opset(AVADDEUQM, r0)
1340 opset(AVADDECUQ, r0)
1342 case AVSUBUM: /* vsububm, vsubuhm, vsubuwm, vsubudm, vsubuqm */
1349 case AVSUBCU: /* vsubcuq, vsubcuw */
1353 case AVSUBUS: /* vsububs, vsubuhs, vsubuws */
1358 case AVSUBSS: /* vsubsbs, vsubshs, vsubsws */
1363 case AVSUBE: /* vsubeuqm, vsubecuq */
1364 opset(AVSUBEUQM, r0)
1365 opset(AVSUBECUQ, r0)
1367 case AVMULESB: /* vmulesb, vmulosb, vmuleub, vmuloub, vmulosh, vmulouh, vmulesw, vmulosw, vmuleuw, vmulouw, vmuluwm */
1380 case AVPMSUM: /* vpmsumb, vpmsumh, vpmsumw, vpmsumd */
1386 case AVR: /* vrlb, vrlh, vrlw, vrld */
1392 case AVS: /* vs[l,r], vs[l,r]o, vs[l,r]b, vs[l,r]h, vs[l,r]w, vs[l,r]d */
1406 case AVSA: /* vsrab, vsrah, vsraw, vsrad */
1412 case AVSOI: /* vsldoi */
1415 case AVCLZ: /* vclzb, vclzh, vclzw, vclzd */
1421 case AVPOPCNT: /* vpopcntb, vpopcnth, vpopcntw, vpopcntd */
1422 opset(AVPOPCNTB, r0)
1423 opset(AVPOPCNTH, r0)
1424 opset(AVPOPCNTW, r0)
1425 opset(AVPOPCNTD, r0)
1427 case AVCMPEQ: /* vcmpequb[.], vcmpequh[.], vcmpequw[.], vcmpequd[.] */
1428 opset(AVCMPEQUB, r0)
1429 opset(AVCMPEQUBCC, r0)
1430 opset(AVCMPEQUH, r0)
1431 opset(AVCMPEQUHCC, r0)
1432 opset(AVCMPEQUW, r0)
1433 opset(AVCMPEQUWCC, r0)
1434 opset(AVCMPEQUD, r0)
1435 opset(AVCMPEQUDCC, r0)
1437 case AVCMPGT: /* vcmpgt[u,s]b[.], vcmpgt[u,s]h[.], vcmpgt[u,s]w[.], vcmpgt[u,s]d[.] */
1438 opset(AVCMPGTUB, r0)
1439 opset(AVCMPGTUBCC, r0)
1440 opset(AVCMPGTUH, r0)
1441 opset(AVCMPGTUHCC, r0)
1442 opset(AVCMPGTUW, r0)
1443 opset(AVCMPGTUWCC, r0)
1444 opset(AVCMPGTUD, r0)
1445 opset(AVCMPGTUDCC, r0)
1446 opset(AVCMPGTSB, r0)
1447 opset(AVCMPGTSBCC, r0)
1448 opset(AVCMPGTSH, r0)
1449 opset(AVCMPGTSHCC, r0)
1450 opset(AVCMPGTSW, r0)
1451 opset(AVCMPGTSWCC, r0)
1452 opset(AVCMPGTSD, r0)
1453 opset(AVCMPGTSDCC, r0)
1455 case AVCMPNEZB: /* vcmpnezb[.] */
1456 opset(AVCMPNEZBCC, r0)
1458 opset(AVCMPNEBCC, r0)
1460 opset(AVCMPNEHCC, r0)
1462 opset(AVCMPNEWCC, r0)
1464 case AVPERM: /* vperm */
1465 opset(AVPERMXOR, r0)
1468 case AVBPERMQ: /* vbpermq, vbpermd */
1471 case AVSEL: /* vsel */
1474 case AVSPLTB: /* vspltb, vsplth, vspltw */
1478 case AVSPLTISB: /* vspltisb, vspltish, vspltisw */
1479 opset(AVSPLTISH, r0)
1480 opset(AVSPLTISW, r0)
1482 case AVCIPH: /* vcipher, vcipherlast */
1484 opset(AVCIPHERLAST, r0)
1486 case AVNCIPH: /* vncipher, vncipherlast */
1487 opset(AVNCIPHER, r0)
1488 opset(AVNCIPHERLAST, r0)
1490 case AVSBOX: /* vsbox */
1493 case AVSHASIGMA: /* vshasigmaw, vshasigmad */
1494 opset(AVSHASIGMAW, r0)
1495 opset(AVSHASIGMAD, r0)
1497 case ALXVD2X: /* lxvd2x, lxvdsx, lxvw4x, lxvh8x, lxvb16x */
1503 case ALXV: /* lxv */
1506 case ALXVL: /* lxvl, lxvll, lxvx */
1510 case ASTXVD2X: /* stxvd2x, stxvdsx, stxvw4x, stxvh8x, stxvb16x */
1513 opset(ASTXVB16X, r0)
1515 case ASTXV: /* stxv */
1518 case ASTXVL: /* stxvl, stxvll, stvx */
1522 case ALXSDX: /* lxsdx */
1525 case ASTXSDX: /* stxsdx */
1528 case ALXSIWAX: /* lxsiwax, lxsiwzx */
1531 case ASTXSIWX: /* stxsiwx */
1534 case AMFVSRD: /* mfvsrd, mfvsrwz (and extended mnemonics), mfvsrld */
1540 case AMTVSRD: /* mtvsrd, mtvsrwa, mtvsrwz (and extended mnemonics), mtvsrdd, mtvsrws */
1548 case AXXLAND: /* xxland, xxlandc, xxleqv, xxlnand */
1553 case AXXLOR: /* xxlorc, xxlnor, xxlor, xxlxor */
1559 case AXXSEL: /* xxsel */
1562 case AXXMRGHW: /* xxmrghw, xxmrglw */
1565 case AXXSPLTW: /* xxspltw */
1568 case AXXSPLTIB: /* xxspltib */
1569 opset(AXXSPLTIB, r0)
1571 case AXXPERM: /* xxpermdi */
1574 case AXXSLDWI: /* xxsldwi */
1575 opset(AXXPERMDI, r0)
1578 case AXXBRQ: /* xxbrq, xxbrd, xxbrw, xxbrh */
1583 case AXSCVDPSP: /* xscvdpsp, xscvspdp, xscvdpspn, xscvspdpn */
1584 opset(AXSCVSPDP, r0)
1585 opset(AXSCVDPSPN, r0)
1586 opset(AXSCVSPDPN, r0)
1588 case AXVCVDPSP: /* xvcvdpsp, xvcvspdp */
1589 opset(AXVCVSPDP, r0)
1591 case AXSCVDPSXDS: /* xscvdpsxds, xscvdpsxws, xscvdpuxds, xscvdpuxws */
1592 opset(AXSCVDPSXWS, r0)
1593 opset(AXSCVDPUXDS, r0)
1594 opset(AXSCVDPUXWS, r0)
1596 case AXSCVSXDDP: /* xscvsxddp, xscvuxddp, xscvsxdsp, xscvuxdsp */
1597 opset(AXSCVUXDDP, r0)
1598 opset(AXSCVSXDSP, r0)
1599 opset(AXSCVUXDSP, r0)
1601 case AXVCVDPSXDS: /* xvcvdpsxds, xvcvdpsxws, xvcvdpuxds, xvcvdpuxws, xvcvspsxds, xvcvspsxws, xvcvspuxds, xvcvspuxws */
1602 opset(AXVCVDPSXDS, r0)
1603 opset(AXVCVDPSXWS, r0)
1604 opset(AXVCVDPUXDS, r0)
1605 opset(AXVCVDPUXWS, r0)
1606 opset(AXVCVSPSXDS, r0)
1607 opset(AXVCVSPSXWS, r0)
1608 opset(AXVCVSPUXDS, r0)
1609 opset(AXVCVSPUXWS, r0)
1611 case AXVCVSXDDP: /* xvcvsxddp, xvcvsxwdp, xvcvuxddp, xvcvuxwdp, xvcvsxdsp, xvcvsxwsp, xvcvuxdsp, xvcvuxwsp */
1612 opset(AXVCVSXWDP, r0)
1613 opset(AXVCVUXDDP, r0)
1614 opset(AXVCVUXWDP, r0)
1615 opset(AXVCVSXDSP, r0)
1616 opset(AXVCVSXWSP, r0)
1617 opset(AXVCVUXDSP, r0)
1618 opset(AXVCVUXWSP, r0)
1620 case AAND: /* logical op Rb,Rs,Ra; no literal */
1634 case AADDME: /* op Ra, Rd */
1638 opset(AADDMEVCC, r0)
1642 opset(AADDZEVCC, r0)
1646 opset(ASUBMEVCC, r0)
1650 opset(ASUBZEVCC, r0)
1670 case AEXTSB: /* op Rs, Ra */
1676 opset(ACNTLZWCC, r0)
1680 opset(ACNTLZDCC, r0)
1682 case AFABS: /* fop [s,]d */
1694 opset(AFCTIWZCC, r0)
1698 opset(AFCTIDZCC, r0)
1702 opset(AFCFIDUCC, r0)
1704 opset(AFCFIDSCC, r0)
1716 opset(AFRSQRTECC, r0)
1720 opset(AFSQRTSCC, r0)
1727 opset(AFCPSGNCC, r0)
1740 opset(AFMADDSCC, r0)
1744 opset(AFMSUBSCC, r0)
1746 opset(AFNMADDCC, r0)
1748 opset(AFNMADDSCC, r0)
1750 opset(AFNMSUBCC, r0)
1752 opset(AFNMSUBSCC, r0)
1768 opset(AMTFSB0CC, r0)
1770 opset(AMTFSB1CC, r0)
1772 case ANEG: /* op [Ra,] Rd */
1778 case AOR: /* or/xor Rb,Rs,Ra; ori/xori $uimm,Rs,R */
1781 case AORIS: /* oris/xoris $uimm,Rs,Ra */
1796 case ASRAW: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */
1800 opset(AEXTSWSLICC, r0)
1802 case ASRAD: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */
1805 case ASUB: /* SUB Ra,Rb,Rd => subf Rd,ra,rb */
1833 opset(ARLDIMICC, r0)
1844 opset(ARLDICLCC, r0)
1846 opset(ARLDICRCC, r0)
1849 opset(ACLRLSLDI, r0)
1862 case ASYSCALL: /* just the op; flow of control */
1901 case AMOVW: /* load/store/move word with sign extension; move 32-bit literals */
1902 opset(AMOVWZ, r0) /* Same as above, but zero extended */
1906 AANDCC, /* and. Rb,Rs,Ra; andi. $uimm,Rs,Ra */
1911 AMOVD, /* load/store/move 64-bit values, including 32-bit literals with/without sign-extension */
1912 AMOVB, /* macro: move byte with sign extension */
1913 AMOVBU, /* macro: move byte with sign extension & update */
1915 /* op $s[,r2],r3; op r1[,r2],r3; no cc/v */
1916 ASUBC, /* op r1,$s,r3; op r1[,r2],r3 */
1941 func OPVXX1(o uint32, xo uint32, oe uint32) uint32 {
1942 return o<<26 | xo<<1 | oe<<11
1945 func OPVXX2(o uint32, xo uint32, oe uint32) uint32 {
1946 return o<<26 | xo<<2 | oe<<11
1949 func OPVXX2VA(o uint32, xo uint32, oe uint32) uint32 {
1950 return o<<26 | xo<<2 | oe<<16
1953 func OPVXX3(o uint32, xo uint32, oe uint32) uint32 {
1954 return o<<26 | xo<<3 | oe<<11
1957 func OPVXX4(o uint32, xo uint32, oe uint32) uint32 {
1958 return o<<26 | xo<<4 | oe<<11
1961 func OPDQ(o uint32, xo uint32, oe uint32) uint32 {
1962 return o<<26 | xo | oe<<4
1965 func OPVX(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
1966 return o<<26 | xo | oe<<11 | rc&1
1969 func OPVC(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
1970 return o<<26 | xo | oe<<11 | (rc&1)<<10
1973 func OPVCC(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
1974 return o<<26 | xo<<1 | oe<<10 | rc&1
1977 func OPCC(o uint32, xo uint32, rc uint32) uint32 {
1978 return OPVCC(o, xo, 0, rc)
1981 /* Generate MD-form opcode */
1982 func OPMD(o, xo, rc uint32) uint32 {
1983 return o<<26 | xo<<2 | rc&1
1986 /* the order is dest, a/s, b/imm for both arithmetic and logical operations */
1987 func AOP_RRR(op uint32, d uint32, a uint32, b uint32) uint32 {
1988 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11
1991 /* VX-form 2-register operands, r/none/r */
1992 func AOP_RR(op uint32, d uint32, a uint32) uint32 {
1993 return op | (d&31)<<21 | (a&31)<<11
1996 /* VA-form 4-register operands */
1997 func AOP_RRRR(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
1998 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&31)<<6
2001 func AOP_IRR(op uint32, d uint32, a uint32, simm uint32) uint32 {
2002 return op | (d&31)<<21 | (a&31)<<16 | simm&0xFFFF
2005 /* VX-form 2-register + UIM operands */
2006 func AOP_VIRR(op uint32, d uint32, a uint32, simm uint32) uint32 {
2007 return op | (d&31)<<21 | (simm&0xFFFF)<<16 | (a&31)<<11
2010 /* VX-form 2-register + ST + SIX operands */
2011 func AOP_IIRR(op uint32, d uint32, a uint32, sbit uint32, simm uint32) uint32 {
2012 return op | (d&31)<<21 | (a&31)<<16 | (sbit&1)<<15 | (simm&0xF)<<11
2015 /* VA-form 3-register + SHB operands */
2016 func AOP_IRRR(op uint32, d uint32, a uint32, b uint32, simm uint32) uint32 {
2017 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (simm&0xF)<<6
2020 /* VX-form 1-register + SIM operands */
2021 func AOP_IR(op uint32, d uint32, simm uint32) uint32 {
2022 return op | (d&31)<<21 | (simm&31)<<16
2025 /* XX1-form 3-register operands, 1 VSR operand */
2026 func AOP_XX1(op uint32, d uint32, a uint32, b uint32) uint32 {
2027 /* For the XX-form encodings, we need the VSX register number to be exactly */
2028 /* between 0-63, so we can properly set the rightmost bits. */
2030 return op | (r&31)<<21 | (a&31)<<16 | (b&31)<<11 | (r&32)>>5
2033 /* XX2-form 3-register operands, 2 VSR operands */
2034 func AOP_XX2(op uint32, d uint32, a uint32, b uint32) uint32 {
2037 return op | (xt&31)<<21 | (a&3)<<16 | (xb&31)<<11 | (xb&32)>>4 | (xt&32)>>5
2040 /* XX3-form 3 VSR operands */
2041 func AOP_XX3(op uint32, d uint32, a uint32, b uint32) uint32 {
2045 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2048 /* XX3-form 3 VSR operands + immediate */
2049 func AOP_XX3I(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2053 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (c&3)<<8 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2056 /* XX4-form, 4 VSR operands */
2057 func AOP_XX4(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2062 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xc&31)<<6 | (xc&32)>>2 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2065 /* DQ-form, VSR register, register + offset operands */
2066 func AOP_DQ(op uint32, d uint32, a uint32, b uint32) uint32 {
2067 /* For the DQ-form encodings, we need the VSX register number to be exactly */
2068 /* between 0-63, so we can properly set the SX bit. */
2070 /* The EA for this instruction form is (RA) + DQ << 4, where DQ is a 12-bit signed integer. */
2071 /* In order to match the output of the GNU objdump (and make the usage in Go asm easier), the */
2072 /* instruction is called using the sign extended value (i.e. a valid offset would be -32752 or 32752, */
2073 /* not -2047 or 2047), so 'b' needs to be adjusted to the expected 12-bit DQ value. Bear in mind that */
2074 /* bits 0 to 3 in 'dq' need to be zero, otherwise this will generate an illegal instruction. */
2075 /* If in doubt how this instruction form is encoded, refer to ISA 3.0b, pages 492 and 507. */
2077 return op | (r&31)<<21 | (a&31)<<16 | (dq&4095)<<4 | (r&32)>>2
2080 /* Z23-form, 3-register operands + CY field */
2081 func AOP_Z23I(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2082 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&3)<<9
2085 /* X-form, 3-register operands + EH field */
2086 func AOP_RRRI(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2087 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c & 1)
2090 func LOP_RRR(op uint32, a uint32, s uint32, b uint32) uint32 {
2091 return op | (s&31)<<21 | (a&31)<<16 | (b&31)<<11
2094 func LOP_IRR(op uint32, a uint32, s uint32, uimm uint32) uint32 {
2095 return op | (s&31)<<21 | (a&31)<<16 | uimm&0xFFFF
2098 func OP_BR(op uint32, li uint32, aa uint32) uint32 {
2099 return op | li&0x03FFFFFC | aa<<1
2102 func OP_BC(op uint32, bo uint32, bi uint32, bd uint32, aa uint32) uint32 {
2103 return op | (bo&0x1F)<<21 | (bi&0x1F)<<16 | bd&0xFFFC | aa<<1
2106 func OP_BCR(op uint32, bo uint32, bi uint32) uint32 {
2107 return op | (bo&0x1F)<<21 | (bi&0x1F)<<16
2110 func OP_RLW(op uint32, a uint32, s uint32, sh uint32, mb uint32, me uint32) uint32 {
2111 return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | (mb&31)<<6 | (me&31)<<1
2114 func AOP_RLDIC(op uint32, a uint32, s uint32, sh uint32, m uint32) uint32 {
2115 return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1 | (m&31)<<6 | ((m&32)>>5)<<5
2118 func AOP_EXTSWSLI(op uint32, a uint32, s uint32, sh uint32) uint32 {
2119 return op | (a&31)<<21 | (s&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1
2122 func AOP_ISEL(op uint32, t uint32, a uint32, b uint32, bc uint32) uint32 {
2123 return op | (t&31)<<21 | (a&31)<<16 | (b&31)<<11 | (bc&0x1F)<<6
2127 /* each rhs is OPVCC(_, _, _, _) */
2128 OP_ADD = 31<<26 | 266<<1 | 0<<10 | 0
2129 OP_ADDI = 14<<26 | 0<<1 | 0<<10 | 0
2130 OP_ADDIS = 15<<26 | 0<<1 | 0<<10 | 0
2131 OP_ANDI = 28<<26 | 0<<1 | 0<<10 | 0
2132 OP_EXTSB = 31<<26 | 954<<1 | 0<<10 | 0
2133 OP_EXTSH = 31<<26 | 922<<1 | 0<<10 | 0
2134 OP_EXTSW = 31<<26 | 986<<1 | 0<<10 | 0
2135 OP_ISEL = 31<<26 | 15<<1 | 0<<10 | 0
2136 OP_MCRF = 19<<26 | 0<<1 | 0<<10 | 0
2137 OP_MCRFS = 63<<26 | 64<<1 | 0<<10 | 0
2138 OP_MCRXR = 31<<26 | 512<<1 | 0<<10 | 0
2139 OP_MFCR = 31<<26 | 19<<1 | 0<<10 | 0
2140 OP_MFFS = 63<<26 | 583<<1 | 0<<10 | 0
2141 OP_MFSPR = 31<<26 | 339<<1 | 0<<10 | 0
2142 OP_MFSR = 31<<26 | 595<<1 | 0<<10 | 0
2143 OP_MFSRIN = 31<<26 | 659<<1 | 0<<10 | 0
2144 OP_MTCRF = 31<<26 | 144<<1 | 0<<10 | 0
2145 OP_MTFSF = 63<<26 | 711<<1 | 0<<10 | 0
2146 OP_MTFSFI = 63<<26 | 134<<1 | 0<<10 | 0
2147 OP_MTSPR = 31<<26 | 467<<1 | 0<<10 | 0
2148 OP_MTSR = 31<<26 | 210<<1 | 0<<10 | 0
2149 OP_MTSRIN = 31<<26 | 242<<1 | 0<<10 | 0
2150 OP_MULLW = 31<<26 | 235<<1 | 0<<10 | 0
2151 OP_MULLD = 31<<26 | 233<<1 | 0<<10 | 0
2152 OP_OR = 31<<26 | 444<<1 | 0<<10 | 0
2153 OP_ORI = 24<<26 | 0<<1 | 0<<10 | 0
2154 OP_ORIS = 25<<26 | 0<<1 | 0<<10 | 0
2155 OP_RLWINM = 21<<26 | 0<<1 | 0<<10 | 0
2156 OP_RLWNM = 23<<26 | 0<<1 | 0<<10 | 0
2157 OP_SUBF = 31<<26 | 40<<1 | 0<<10 | 0
2158 OP_RLDIC = 30<<26 | 4<<1 | 0<<10 | 0
2159 OP_RLDICR = 30<<26 | 2<<1 | 0<<10 | 0
2160 OP_RLDICL = 30<<26 | 0<<1 | 0<<10 | 0
2161 OP_RLDCL = 30<<26 | 8<<1 | 0<<10 | 0
2162 OP_EXTSWSLI = 31<<26 | 445<<2
2165 func oclass(a *obj.Addr) int {
2166 return int(a.Class) - 1
2174 // This function determines when a non-indexed load or store is D or
2175 // DS form for use in finding the size of the offset field in the instruction.
2176 // The size is needed when setting the offset value in the instruction
2177 // and when generating relocation for that field.
2178 // DS form instructions include: ld, ldu, lwa, std, stdu. All other
2179 // loads and stores with an offset field are D form. This function should
2180 // only be called with the same opcodes as are handled by opstore and opload.
2181 func (c *ctxt9) opform(insn uint32) int {
2184 c.ctxt.Diag("bad insn in loadform: %x", insn)
2185 case OPVCC(58, 0, 0, 0), // ld
2186 OPVCC(58, 0, 0, 1), // ldu
2187 OPVCC(58, 0, 0, 0) | 1<<1, // lwa
2188 OPVCC(62, 0, 0, 0), // std
2189 OPVCC(62, 0, 0, 1): //stdu
2191 case OP_ADDI, // add
2192 OPVCC(32, 0, 0, 0), // lwz
2193 OPVCC(33, 0, 0, 0), // lwzu
2194 OPVCC(34, 0, 0, 0), // lbz
2195 OPVCC(35, 0, 0, 0), // lbzu
2196 OPVCC(40, 0, 0, 0), // lhz
2197 OPVCC(41, 0, 0, 0), // lhzu
2198 OPVCC(42, 0, 0, 0), // lha
2199 OPVCC(43, 0, 0, 0), // lhau
2200 OPVCC(46, 0, 0, 0), // lmw
2201 OPVCC(48, 0, 0, 0), // lfs
2202 OPVCC(49, 0, 0, 0), // lfsu
2203 OPVCC(50, 0, 0, 0), // lfd
2204 OPVCC(51, 0, 0, 0), // lfdu
2205 OPVCC(36, 0, 0, 0), // stw
2206 OPVCC(37, 0, 0, 0), // stwu
2207 OPVCC(38, 0, 0, 0), // stb
2208 OPVCC(39, 0, 0, 0), // stbu
2209 OPVCC(44, 0, 0, 0), // sth
2210 OPVCC(45, 0, 0, 0), // sthu
2211 OPVCC(47, 0, 0, 0), // stmw
2212 OPVCC(52, 0, 0, 0), // stfs
2213 OPVCC(53, 0, 0, 0), // stfsu
2214 OPVCC(54, 0, 0, 0), // stfd
2215 OPVCC(55, 0, 0, 0): // stfdu
2221 // Encode instructions and create relocation for accessing s+d according to the
2222 // instruction op with source or destination (as appropriate) register reg.
2223 func (c *ctxt9) symbolAccess(s *obj.LSym, d int64, reg int16, op uint32) (o1, o2 uint32) {
2224 if c.ctxt.Headtype == objabi.Haix {
2225 // Every symbol access must be made via a TOC anchor.
2226 c.ctxt.Diag("symbolAccess called for %s", s.Name)
2229 form := c.opform(op)
2230 if c.ctxt.Flag_shared {
2235 o1 = AOP_IRR(OP_ADDIS, REGTMP, base, 0)
2236 o2 = AOP_IRR(op, uint32(reg), REGTMP, 0)
2237 rel := obj.Addrel(c.cursym)
2238 rel.Off = int32(c.pc)
2242 if c.ctxt.Flag_shared {
2245 rel.Type = objabi.R_ADDRPOWER_TOCREL
2247 rel.Type = objabi.R_ADDRPOWER_TOCREL_DS
2253 rel.Type = objabi.R_ADDRPOWER
2255 rel.Type = objabi.R_ADDRPOWER_DS
2264 func getmask(m []byte, v uint32) bool {
2267 if v != ^uint32(0) && v&(1<<31) != 0 && v&1 != 0 { /* MB > ME */
2278 for i := 0; i < 32; i++ {
2279 if v&(1<<uint(31-i)) != 0 {
2284 if i >= 32 || v&(1<<uint(31-i)) == 0 {
2290 if v&(1<<uint(31-i)) != 0 {
2301 func (c *ctxt9) maskgen(p *obj.Prog, m []byte, v uint32) {
2303 c.ctxt.Diag("cannot generate mask #%x\n%v", v, p)
2308 * 64-bit masks (rldic etc)
2310 func getmask64(m []byte, v uint64) bool {
2313 for i := 0; i < 64; i++ {
2314 if v&(uint64(1)<<uint(63-i)) != 0 {
2319 if i >= 64 || v&(uint64(1)<<uint(63-i)) == 0 {
2325 if v&(uint64(1)<<uint(63-i)) != 0 {
2336 func (c *ctxt9) maskgen64(p *obj.Prog, m []byte, v uint64) {
2337 if !getmask64(m, v) {
2338 c.ctxt.Diag("cannot generate mask #%x\n%v", v, p)
2342 func loadu32(r int, d int64) uint32 {
2344 if isuint32(uint64(d)) {
2345 return LOP_IRR(OP_ORIS, uint32(r), REGZERO, uint32(v))
2347 return AOP_IRR(OP_ADDIS, uint32(r), REGZERO, uint32(v))
2350 func high16adjusted(d int32) uint16 {
2352 return uint16((d >> 16) + 1)
2354 return uint16(d >> 16)
2357 func (c *ctxt9) asmout(p *obj.Prog, o *Optab, out []uint32) {
2364 //print("%v => case %d\n", p, o->type);
2367 c.ctxt.Diag("unknown type %d", o.type_)
2370 case 0: /* pseudo ops */
2373 case 2: /* int/cr/fp op Rb,[Ra],Rd */
2379 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2381 case 3: /* mov $soreg/addcon/andcon/ucon, r ==> addis/oris/addi/ori $i,reg',r */
2382 d := c.vregoff(&p.From)
2385 r := int(p.From.Reg)
2387 r = c.getimpliedreg(&p.From, p)
2389 if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 && (r != 0 || v != 0) {
2390 c.ctxt.Diag("literal operation on R0\n%v", p)
2395 log.Fatalf("invalid handling of %v", p)
2397 // For UCON operands the value is right shifted 16, using ADDIS if the
2398 // value should be signed, ORIS if unsigned.
2400 if r == REGZERO && isuint32(uint64(d)) {
2401 o1 = LOP_IRR(OP_ORIS, uint32(p.To.Reg), REGZERO, uint32(v))
2406 } else if int64(int16(d)) != d {
2407 // Operand is 16 bit value with sign bit set
2408 if o.a1 == C_ANDCON {
2409 // Needs unsigned 16 bit so use ORI
2410 if r == 0 || r == REGZERO {
2411 o1 = LOP_IRR(uint32(OP_ORI), uint32(p.To.Reg), uint32(0), uint32(v))
2414 // With ADDCON, needs signed 16 bit value, fall through to use ADDI
2415 } else if o.a1 != C_ADDCON {
2416 log.Fatalf("invalid handling of %v", p)
2420 o1 = AOP_IRR(uint32(a), uint32(p.To.Reg), uint32(r), uint32(v))
2422 case 4: /* add/mul $scon,[r1],r2 */
2423 v := c.regoff(&p.From)
2429 if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 {
2430 c.ctxt.Diag("literal operation on R0\n%v", p)
2432 if int32(int16(v)) != v {
2433 log.Fatalf("mishandled instruction %v", p)
2435 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2437 case 5: /* syscall */
2440 case 6: /* logical op Rb,[Rs,]Ra; no literal */
2446 // AROTL and AROTLW are extended mnemonics, which map to RLDCL and RLWNM.
2449 o1 = AOP_RLDIC(OP_RLDCL, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), uint32(0))
2451 o1 = OP_RLW(OP_RLWNM, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), 0, 31)
2453 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2456 case 7: /* mov r, soreg ==> stw o(r) */
2460 r = c.getimpliedreg(&p.To, p)
2462 v := c.regoff(&p.To)
2463 if p.To.Type == obj.TYPE_MEM && p.To.Index != 0 {
2465 c.ctxt.Diag("illegal indexed instruction\n%v", p)
2467 o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(r))
2469 if int32(int16(v)) != v {
2470 log.Fatalf("mishandled instruction %v", p)
2472 // Offsets in DS form stores must be a multiple of 4
2473 inst := c.opstore(p.As)
2474 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
2475 log.Fatalf("invalid offset for DS form load/store %v", p)
2477 o1 = AOP_IRR(inst, uint32(p.From.Reg), uint32(r), uint32(v))
2480 case 8: /* mov soreg, r ==> lbz/lhz/lwz o(r) */
2481 r := int(p.From.Reg)
2484 r = c.getimpliedreg(&p.From, p)
2486 v := c.regoff(&p.From)
2487 if p.From.Type == obj.TYPE_MEM && p.From.Index != 0 {
2489 c.ctxt.Diag("illegal indexed instruction\n%v", p)
2491 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(r))
2493 if int32(int16(v)) != v {
2494 log.Fatalf("mishandled instruction %v", p)
2496 // Offsets in DS form loads must be a multiple of 4
2497 inst := c.opload(p.As)
2498 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
2499 log.Fatalf("invalid offset for DS form load/store %v", p)
2501 o1 = AOP_IRR(inst, uint32(p.To.Reg), uint32(r), uint32(v))
2504 case 9: /* movb soreg, r ==> lbz o(r),r2; extsb r2,r2 */
2505 r := int(p.From.Reg)
2508 r = c.getimpliedreg(&p.From, p)
2510 v := c.regoff(&p.From)
2511 if p.From.Type == obj.TYPE_MEM && p.From.Index != 0 {
2513 c.ctxt.Diag("illegal indexed instruction\n%v", p)
2515 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(r))
2517 o1 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2519 o2 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
2521 case 10: /* sub Ra,[Rb],Rd => subf Rd,Ra,Rb */
2527 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(r))
2529 case 11: /* br/bl lbra */
2532 if p.To.Target() != nil {
2533 v = int32(p.To.Target().Pc - p.Pc)
2535 c.ctxt.Diag("odd branch target address\n%v", p)
2539 if v < -(1<<25) || v >= 1<<24 {
2540 c.ctxt.Diag("branch too far\n%v", p)
2544 o1 = OP_BR(c.opirr(p.As), uint32(v), 0)
2545 if p.To.Sym != nil {
2546 rel := obj.Addrel(c.cursym)
2547 rel.Off = int32(c.pc)
2550 v += int32(p.To.Offset)
2552 c.ctxt.Diag("odd branch target address\n%v", p)
2557 rel.Type = objabi.R_CALLPOWER
2559 o2 = 0x60000000 // nop, sometimes overwritten by ld r2, 24(r1) when dynamic linking
2561 case 13: /* mov[bhwd]{z,} r,r */
2562 // This needs to handle "MOV* $0, Rx". This shows up because $0 also
2563 // matches C_REG if r0iszero. This happens because C_REG sorts before C_ANDCON
2564 // TODO: fix the above behavior and cleanup this exception.
2565 if p.From.Type == obj.TYPE_CONST {
2566 o1 = LOP_IRR(OP_ADDI, REGZERO, uint32(p.To.Reg), 0)
2569 if p.To.Type == obj.TYPE_CONST {
2570 c.ctxt.Diag("cannot move into constant 0\n%v", p)
2575 o1 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2577 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 24, 31)
2579 o1 = LOP_RRR(OP_EXTSH, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2581 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 16, 31)
2583 o1 = LOP_RRR(OP_EXTSW, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2585 o1 = OP_RLW(OP_RLDIC, uint32(p.To.Reg), uint32(p.From.Reg), 0, 0, 0) | 1<<5 /* MB=32 */
2587 o1 = LOP_RRR(OP_OR, uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.From.Reg))
2589 c.ctxt.Diag("internal: bad register move/truncation\n%v", p)
2592 case 14: /* rldc[lr] Rb,Rs,$mask,Ra -- left, right give different masks */
2598 d := c.vregoff(p.GetFrom3())
2602 // These opcodes expect a mask operand that has to be converted into the
2603 // appropriate operand. The way these were defined, not all valid masks are possible.
2604 // Left here for compatibility in case they were used or generated.
2605 case ARLDCL, ARLDCLCC:
2607 c.maskgen64(p, mask[:], uint64(d))
2609 a = int(mask[0]) /* MB */
2611 c.ctxt.Diag("invalid mask for rotate: %x (end != bit 63)\n%v", uint64(d), p)
2613 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2614 o1 |= (uint32(a) & 31) << 6
2616 o1 |= 1 << 5 /* mb[5] is top bit */
2619 case ARLDCR, ARLDCRCC:
2621 c.maskgen64(p, mask[:], uint64(d))
2623 a = int(mask[1]) /* ME */
2625 c.ctxt.Diag("invalid mask for rotate: %x %x (start != 0)\n%v", uint64(d), mask[0], p)
2627 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2628 o1 |= (uint32(a) & 31) << 6
2630 o1 |= 1 << 5 /* mb[5] is top bit */
2633 // These opcodes use a shift count like the ppc64 asm, no mask conversion done
2634 case ARLDICR, ARLDICRCC:
2636 sh := c.regoff(&p.From)
2637 if me < 0 || me > 63 || sh > 63 {
2638 c.ctxt.Diag("Invalid me or sh for RLDICR: %x %x\n%v", int(d), sh, p)
2640 o1 = AOP_RLDIC(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(sh), uint32(me))
2642 case ARLDICL, ARLDICLCC, ARLDIC, ARLDICCC:
2644 sh := c.regoff(&p.From)
2645 if mb < 0 || mb > 63 || sh > 63 {
2646 c.ctxt.Diag("Invalid mb or sh for RLDIC, RLDICL: %x %x\n%v", mb, sh, p)
2648 o1 = AOP_RLDIC(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(sh), uint32(mb))
2651 // This is an extended mnemonic defined in the ISA section C.8.1
2652 // clrlsldi ra,rs,b,n --> rldic ra,rs,n,b-n
2653 // It maps onto RLDIC so is directly generated here based on the operands from
2656 b := c.regoff(&p.From)
2657 if n > b || b > 63 {
2658 c.ctxt.Diag("Invalid n or b for CLRLSLDI: %x %x\n%v", n, b, p)
2660 o1 = AOP_RLDIC(OP_RLDIC, uint32(p.To.Reg), uint32(r), uint32(n), uint32(b)-uint32(n))
2663 c.ctxt.Diag("unexpected op in rldc case\n%v", p)
2667 case 17, /* bc bo,bi,lbra (same for now) */
2668 16: /* bc bo,bi,sbra */
2673 if p.From.Type == obj.TYPE_CONST {
2674 a = int(c.regoff(&p.From))
2675 } else if p.From.Type == obj.TYPE_REG {
2677 c.ctxt.Diag("unexpected register setting for branch with CR: %d\n", r)
2679 // BI values for the CR
2698 c.ctxt.Diag("unrecognized register: expecting CR\n")
2702 if p.To.Target() != nil {
2703 v = int32(p.To.Target().Pc - p.Pc)
2706 c.ctxt.Diag("odd branch target address\n%v", p)
2710 if v < -(1<<16) || v >= 1<<15 {
2711 c.ctxt.Diag("branch too far\n%v", p)
2713 o1 = OP_BC(c.opirr(p.As), uint32(a), uint32(r), uint32(v), 0)
2715 case 15: /* br/bl (r) => mov r,lr; br/bl (lr) */
2717 if p.As == ABC || p.As == ABCL {
2718 v = c.regoff(&p.To) & 31
2720 v = 20 /* unconditional */
2722 o1 = AOP_RRR(OP_MTSPR, uint32(p.To.Reg), 0, 0) | (REG_LR&0x1f)<<16 | ((REG_LR>>5)&0x1f)<<11
2723 o2 = OPVCC(19, 16, 0, 0)
2724 if p.As == ABL || p.As == ABCL {
2727 o2 = OP_BCR(o2, uint32(v), uint32(p.To.Index))
2729 case 18: /* br/bl (lr/ctr); bc/bcl bo,bi,(lr/ctr) */
2732 if p.As == ABC || p.As == ABCL {
2733 v = c.regoff(&p.From) & 31
2735 v = 20 /* unconditional */
2741 switch oclass(&p.To) {
2743 o1 = OPVCC(19, 528, 0, 0)
2746 o1 = OPVCC(19, 16, 0, 0)
2749 c.ctxt.Diag("bad optab entry (18): %d\n%v", p.To.Class, p)
2753 // Insert optional branch hint for bclr[l]/bcctr[l]
2754 if p.From3Type() != obj.TYPE_NONE {
2755 bh = uint32(p.GetFrom3().Offset)
2756 if bh == 2 || bh > 3 {
2757 log.Fatalf("BH must be 0,1,3 for %v", p)
2762 if p.As == ABL || p.As == ABCL {
2765 o1 = OP_BCR(o1, uint32(v), uint32(r))
2767 case 19: /* mov $lcon,r ==> cau+or */
2768 d := c.vregoff(&p.From)
2769 o1 = loadu32(int(p.To.Reg), d)
2770 o2 = LOP_IRR(OP_ORI, uint32(p.To.Reg), uint32(p.To.Reg), uint32(int32(d)))
2772 case 20: /* add $ucon,,r | addis $addcon,r,r */
2773 v := c.regoff(&p.From)
2779 if p.As == AADD && (r0iszero == 0 /*TypeKind(100016)*/ && p.Reg == 0 || r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0) {
2780 c.ctxt.Diag("literal operation on R0\n%v", p)
2783 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2785 o1 = AOP_IRR(c.opirr(AADDIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
2788 case 22: /* add $lcon/$andcon,r1,r2 ==> oris+ori+add/ori+add */
2789 if p.To.Reg == REGTMP || p.Reg == REGTMP {
2790 c.ctxt.Diag("can't synthesize large constant\n%v", p)
2792 d := c.vregoff(&p.From)
2797 if p.From.Sym != nil {
2798 c.ctxt.Diag("%v is not supported", p)
2800 // If operand is ANDCON, generate 2 instructions using
2801 // ORI for unsigned value; with LCON 3 instructions.
2803 o1 = LOP_IRR(OP_ORI, REGTMP, REGZERO, uint32(int32(d)))
2804 o2 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2806 o1 = loadu32(REGTMP, d)
2807 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d)))
2808 o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2811 case 23: /* and $lcon/$addcon,r1,r2 ==> oris+ori+and/addi+and */
2812 if p.To.Reg == REGTMP || p.Reg == REGTMP {
2813 c.ctxt.Diag("can't synthesize large constant\n%v", p)
2815 d := c.vregoff(&p.From)
2821 // With ADDCON operand, generate 2 instructions using ADDI for signed value,
2822 // with LCON operand generate 3 instructions.
2824 o1 = LOP_IRR(OP_ADDI, REGZERO, REGTMP, uint32(int32(d)))
2825 o2 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2827 o1 = loadu32(REGTMP, d)
2828 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d)))
2829 o3 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2831 if p.From.Sym != nil {
2832 c.ctxt.Diag("%v is not supported", p)
2835 case 24: /* lfd fA,float64(0) -> xxlxor xsA,xsaA,xsaA + fneg for -0 */
2836 o1 = AOP_XX3I(c.oprrr(AXXLXOR), uint32(p.To.Reg), uint32(p.To.Reg), uint32(p.To.Reg), uint32(0))
2837 // This is needed for -0.
2839 o2 = AOP_RRR(c.oprrr(AFNEG), uint32(p.To.Reg), 0, uint32(p.To.Reg))
2843 /* sld[.] $sh,rS,rA -> rldicr[.] $sh,rS,mask(0,63-sh),rA; srd[.] -> rldicl */
2844 v := c.regoff(&p.From)
2872 c.ctxt.Diag("unexpected op in sldi case\n%v", p)
2877 if p.As == AEXTSWSLI || p.As == AEXTSWSLICC {
2878 o1 = AOP_EXTSWSLI(OP_EXTSWSLI, uint32(r), uint32(p.To.Reg), uint32(v))
2881 o1 = AOP_RLDIC(op, uint32(p.To.Reg), uint32(r), uint32(v), uint32(a))
2883 if p.As == ASLDCC || p.As == ASRDCC || p.As == AEXTSWSLICC {
2884 o1 |= 1 // Set the condition code bit
2887 case 26: /* mov $lsext/auto/oreg,,r2 ==> addis+addi */
2888 v := c.vregoff(&p.From)
2889 r := int(p.From.Reg)
2891 switch p.From.Name {
2892 case obj.NAME_EXTERN, obj.NAME_STATIC:
2893 // Load a 32 bit constant, or relocation depending on if a symbol is attached
2894 o1, o2 = c.symbolAccess(p.From.Sym, v, p.To.Reg, OP_ADDI)
2897 r = c.getimpliedreg(&p.From, p)
2899 // Add a 32 bit offset to a register.
2900 o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(int32(v))))
2901 o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), REGTMP, uint32(v))
2904 case 27: /* subc ra,$simm,rd => subfic rd,ra,$simm */
2905 v := c.regoff(p.GetFrom3())
2907 r := int(p.From.Reg)
2908 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2910 case 28: /* subc r1,$lcon,r2 ==> cau+or+subfc */
2911 if p.To.Reg == REGTMP || p.From.Reg == REGTMP {
2912 c.ctxt.Diag("can't synthesize large constant\n%v", p)
2914 v := c.regoff(p.GetFrom3())
2915 o1 = AOP_IRR(OP_ADDIS, REGTMP, REGZERO, uint32(v)>>16)
2916 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(v))
2917 o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), REGTMP)
2918 if p.From.Sym != nil {
2919 c.ctxt.Diag("%v is not supported", p)
2922 case 29: /* rldic[lr]? $sh,s,$mask,a -- left, right, plain give different masks */
2923 v := c.regoff(&p.From)
2925 d := c.vregoff(p.GetFrom3())
2927 c.maskgen64(p, mask[:], uint64(d))
2930 case ARLDC, ARLDCCC:
2931 a = int(mask[0]) /* MB */
2932 if int32(mask[1]) != (63 - v) {
2933 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[1], v, p)
2936 case ARLDCL, ARLDCLCC:
2937 a = int(mask[0]) /* MB */
2939 c.ctxt.Diag("invalid mask for shift: %x %s (shift %d)\n%v", uint64(d), mask[1], v, p)
2942 case ARLDCR, ARLDCRCC:
2943 a = int(mask[1]) /* ME */
2945 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[0], v, p)
2949 c.ctxt.Diag("unexpected op in rldic case\n%v", p)
2953 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
2954 o1 |= (uint32(a) & 31) << 6
2959 o1 |= 1 << 5 /* mb[5] is top bit */
2962 case 30: /* rldimi $sh,s,$mask,a */
2963 v := c.regoff(&p.From)
2965 d := c.vregoff(p.GetFrom3())
2967 // Original opcodes had mask operands which had to be converted to a shift count as expected by
2970 case ARLDMI, ARLDMICC:
2972 c.maskgen64(p, mask[:], uint64(d))
2973 if int32(mask[1]) != (63 - v) {
2974 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[1], v, p)
2976 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
2977 o1 |= (uint32(mask[0]) & 31) << 6
2981 if mask[0]&0x20 != 0 {
2982 o1 |= 1 << 5 /* mb[5] is top bit */
2985 // Opcodes with shift count operands.
2986 case ARLDIMI, ARLDIMICC:
2987 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
2988 o1 |= (uint32(d) & 31) << 6
2997 case 31: /* dword */
2998 d := c.vregoff(&p.From)
3000 if c.ctxt.Arch.ByteOrder == binary.BigEndian {
3001 o1 = uint32(d >> 32)
3005 o2 = uint32(d >> 32)
3008 if p.From.Sym != nil {
3009 rel := obj.Addrel(c.cursym)
3010 rel.Off = int32(c.pc)
3012 rel.Sym = p.From.Sym
3013 rel.Add = p.From.Offset
3014 rel.Type = objabi.R_ADDR
3019 case 32: /* fmul frc,fra,frd */
3025 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0) | (uint32(p.From.Reg)&31)<<6
3027 case 33: /* fabs [frb,]frd; fmr. frb,frd */
3028 r := int(p.From.Reg)
3030 if oclass(&p.From) == C_NONE {
3033 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(r))
3035 case 34: /* FMADDx fra,frb,frc,frt (t=a*c±b) */
3036 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg)) | (uint32(p.GetFrom3().Reg)&31)<<6
3038 case 35: /* mov r,lext/lauto/loreg ==> cau $(v>>16),sb,r'; store o(r') */
3039 v := c.regoff(&p.To)
3043 r = c.getimpliedreg(&p.To, p)
3045 // Offsets in DS form stores must be a multiple of 4
3046 inst := c.opstore(p.As)
3047 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3048 log.Fatalf("invalid offset for DS form load/store %v", p)
3050 o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v)))
3051 o2 = AOP_IRR(inst, uint32(p.From.Reg), REGTMP, uint32(v))
3053 case 36: /* mov bz/h/hz lext/lauto/lreg,r ==> lbz/lha/lhz etc */
3054 v := c.regoff(&p.From)
3056 r := int(p.From.Reg)
3058 r = c.getimpliedreg(&p.From, p)
3060 o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v)))
3061 o2 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), REGTMP, uint32(v))
3063 case 37: /* movb lext/lauto/lreg,r ==> lbz o(reg),r; extsb r */
3064 v := c.regoff(&p.From)
3066 r := int(p.From.Reg)
3068 r = c.getimpliedreg(&p.From, p)
3070 o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v)))
3071 o2 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), REGTMP, uint32(v))
3072 o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3075 o1 = uint32(c.regoff(&p.From))
3077 case 41: /* stswi */
3078 o1 = AOP_RRR(c.opirr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11
3081 o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11
3083 case 43: /* data cache instructions: op (Ra+[Rb]), [th|l] */
3084 /* TH field for dcbt/dcbtst: */
3085 /* 0 = Block access - program will soon access EA. */
3086 /* 8-15 = Stream access - sequence of access (data stream). See section 4.3.2 of the ISA for details. */
3087 /* 16 = Block access - program will soon make a transient access to EA. */
3088 /* 17 = Block access - program will not access EA for a long time. */
3090 /* L field for dcbf: */
3091 /* 0 = invalidates the block containing EA in all processors. */
3092 /* 1 = same as 0, but with limited scope (i.e. block in the current processor will not be reused soon). */
3093 /* 3 = same as 1, but with even more limited scope (i.e. block in the current processor primary cache will not be reused soon). */
3094 if p.To.Type == obj.TYPE_NONE {
3095 o1 = AOP_RRR(c.oprrr(p.As), 0, uint32(p.From.Index), uint32(p.From.Reg))
3097 th := c.regoff(&p.To)
3098 o1 = AOP_RRR(c.oprrr(p.As), uint32(th), uint32(p.From.Index), uint32(p.From.Reg))
3101 case 44: /* indexed store */
3102 o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg))
3104 case 45: /* indexed load */
3106 /* The assembler accepts a 4-operand l*arx instruction. The fourth operand is an Exclusive Access Hint (EH) */
3107 /* The EH field can be used as a lock acquire/release hint as follows: */
3108 /* 0 = Atomic Update (fetch-and-operate or similar algorithm) */
3109 /* 1 = Exclusive Access (lock acquire and release) */
3110 case ALBAR, ALHAR, ALWAR, ALDAR:
3111 if p.From3Type() != obj.TYPE_NONE {
3112 eh := int(c.regoff(p.GetFrom3()))
3114 c.ctxt.Diag("illegal EH field\n%v", p)
3116 o1 = AOP_RRRI(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg), uint32(eh))
3118 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3121 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3123 case 46: /* plain op */
3126 case 47: /* op Ra, Rd; also op [Ra,] Rd */
3127 r := int(p.From.Reg)
3132 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0)
3134 case 48: /* op Rs, Ra */
3135 r := int(p.From.Reg)
3140 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0)
3142 case 49: /* op Rb; op $n, Rb */
3143 if p.From.Type != obj.TYPE_REG { /* tlbie $L, rB */
3144 v := c.regoff(&p.From) & 1
3145 o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.To.Reg)) | uint32(v)<<21
3147 o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.From.Reg))
3150 case 50: /* rem[u] r1[,r2],r3 */
3157 t := v & (1<<10 | 1) /* OE|Rc */
3158 o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg))
3159 o2 = AOP_RRR(OP_MULLW, REGTMP, REGTMP, uint32(p.From.Reg))
3160 o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r))
3164 /* Clear top 32 bits */
3165 o3 = OP_RLW(OP_RLDIC, REGTMP, REGTMP, 0, 0, 0) | 1<<5
3168 case 51: /* remd[u] r1[,r2],r3 */
3175 t := v & (1<<10 | 1) /* OE|Rc */
3176 o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg))
3177 o2 = AOP_RRR(OP_MULLD, REGTMP, REGTMP, uint32(p.From.Reg))
3178 o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r))
3179 /* cases 50,51: removed; can be reused. */
3181 /* cases 50,51: removed; can be reused. */
3183 case 52: /* mtfsbNx cr(n) */
3184 v := c.regoff(&p.From) & 31
3186 o1 = AOP_RRR(c.oprrr(p.As), uint32(v), 0, 0)
3188 case 53: /* mffsX ,fr1 */
3189 o1 = AOP_RRR(OP_MFFS, uint32(p.To.Reg), 0, 0)
3191 case 55: /* op Rb, Rd */
3192 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(p.From.Reg))
3194 case 56: /* sra $sh,[s,]a; srd $sh,[s,]a */
3195 v := c.regoff(&p.From)
3201 o1 = AOP_RRR(c.opirr(p.As), uint32(r), uint32(p.To.Reg), uint32(v)&31)
3202 if (p.As == ASRAD || p.As == ASRADCC) && (v&0x20 != 0) {
3203 o1 |= 1 << 1 /* mb[5] */
3206 case 57: /* slw $sh,[s,]a -> rlwinm ... */
3207 v := c.regoff(&p.From)
3215 * Let user (gs) shoot himself in the foot.
3216 * qc has already complained.
3219 ctxt->diag("illegal shift %ld\n%v", v, p);
3229 mask[0], mask[1] = 0, 31
3231 mask[0], mask[1] = uint8(v), 31
3234 mask[0], mask[1] = 0, uint8(31-v)
3236 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(r), uint32(v), uint32(mask[0]), uint32(mask[1]))
3237 if p.As == ASLWCC || p.As == ASRWCC {
3238 o1 |= 1 // set the condition code
3241 case 58: /* logical $andcon,[s],a */
3242 v := c.regoff(&p.From)
3248 o1 = LOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3250 case 59: /* or/xor/and $ucon,,r | oris/xoris/andis $addcon,r,r */
3251 v := c.regoff(&p.From)
3259 o1 = LOP_IRR(c.opirr(AORIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16) /* oris, xoris, andis. */
3261 o1 = LOP_IRR(c.opirr(AXORIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
3263 o1 = LOP_IRR(c.opirr(AANDISCC), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
3265 o1 = LOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3268 case 60: /* tw to,a,b */
3269 r := int(c.regoff(&p.From) & 31)
3271 o1 = AOP_RRR(c.oprrr(p.As), uint32(r), uint32(p.Reg), uint32(p.To.Reg))
3273 case 61: /* tw to,a,$simm */
3274 r := int(c.regoff(&p.From) & 31)
3276 v := c.regoff(&p.To)
3277 o1 = AOP_IRR(c.opirr(p.As), uint32(r), uint32(p.Reg), uint32(v))
3279 case 62: /* rlwmi $sh,s,$mask,a */
3280 v := c.regoff(&p.From)
3283 n := c.regoff(p.GetFrom3())
3284 // This is an extended mnemonic described in the ISA C.8.2
3285 // clrlslwi ra,rs,b,n -> rlwinm ra,rs,n,b-n,31-n
3286 // It maps onto rlwinm which is directly generated here.
3287 if n > v || v >= 32 {
3288 c.ctxt.Diag("Invalid n or b for CLRLSLWI: %x %x\n%v", v, n, p)
3291 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.Reg), uint32(n), uint32(v-n), uint32(31-n))
3294 c.maskgen(p, mask[:], uint32(c.regoff(p.GetFrom3())))
3295 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), uint32(v))
3296 o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1
3299 case 63: /* rlwmi b,s,$mask,a */
3301 c.maskgen(p, mask[:], uint32(c.regoff(p.GetFrom3())))
3302 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.Reg), uint32(p.To.Reg), uint32(p.From.Reg))
3303 o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1
3305 case 64: /* mtfsf fr[, $m] {,fpcsr} */
3307 if p.From3Type() != obj.TYPE_NONE {
3308 v = c.regoff(p.GetFrom3()) & 255
3312 o1 = OP_MTFSF | uint32(v)<<17 | uint32(p.From.Reg)<<11
3314 case 65: /* MOVFL $imm,FPSCR(n) => mtfsfi crfd,imm */
3316 c.ctxt.Diag("must specify FPSCR(n)\n%v", p)
3318 o1 = OP_MTFSFI | (uint32(p.To.Reg)&15)<<23 | (uint32(c.regoff(&p.From))&31)<<12
3320 case 66: /* mov spr,r1; mov r1,spr, also dcr */
3323 if REG_R0 <= p.From.Reg && p.From.Reg <= REG_R31 {
3326 if REG_DCR0 <= v && v <= REG_DCR0+1023 {
3327 o1 = OPVCC(31, 451, 0, 0) /* mtdcr */
3329 o1 = OPVCC(31, 467, 0, 0) /* mtspr */
3333 v = int32(p.From.Reg)
3334 if REG_DCR0 <= v && v <= REG_DCR0+1023 {
3335 o1 = OPVCC(31, 323, 0, 0) /* mfdcr */
3337 o1 = OPVCC(31, 339, 0, 0) /* mfspr */
3341 o1 = AOP_RRR(o1, uint32(r), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11
3343 case 67: /* mcrf crfD,crfS */
3344 if p.From.Type != obj.TYPE_REG || p.From.Reg < REG_CR0 || REG_CR7 < p.From.Reg || p.To.Type != obj.TYPE_REG || p.To.Reg < REG_CR0 || REG_CR7 < p.To.Reg {
3345 c.ctxt.Diag("illegal CR field number\n%v", p)
3347 o1 = AOP_RRR(OP_MCRF, ((uint32(p.To.Reg) & 7) << 2), ((uint32(p.From.Reg) & 7) << 2), 0)
3349 case 68: /* mfcr rD; mfocrf CRM,rD */
3350 if p.From.Type == obj.TYPE_REG && REG_CR0 <= p.From.Reg && p.From.Reg <= REG_CR7 {
3351 v := int32(1 << uint(7-(p.To.Reg&7))) /* CR(n) */
3352 o1 = AOP_RRR(OP_MFCR, uint32(p.To.Reg), 0, 0) | 1<<20 | uint32(v)<<12 /* new form, mfocrf */
3354 o1 = AOP_RRR(OP_MFCR, uint32(p.To.Reg), 0, 0) /* old form, whole register */
3357 case 69: /* mtcrf CRM,rS */
3359 if p.From3Type() != obj.TYPE_NONE {
3361 c.ctxt.Diag("can't use both mask and CR(n)\n%v", p)
3363 v = c.regoff(p.GetFrom3()) & 0xff
3368 v = 1 << uint(7-(p.To.Reg&7)) /* CR(n) */
3372 o1 = AOP_RRR(OP_MTCRF, uint32(p.From.Reg), 0, 0) | uint32(v)<<12
3374 case 70: /* [f]cmp r,r,cr*/
3379 r = (int(p.Reg) & 7) << 2
3381 o1 = AOP_RRR(c.oprrr(p.As), uint32(r), uint32(p.From.Reg), uint32(p.To.Reg))
3383 case 71: /* cmp[l] r,i,cr*/
3388 r = (int(p.Reg) & 7) << 2
3390 o1 = AOP_RRR(c.opirr(p.As), uint32(r), uint32(p.From.Reg), 0) | uint32(c.regoff(&p.To))&0xffff
3392 case 72: /* slbmte (Rb+Rs -> slb[Rb]) -> Rs, Rb */
3393 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), 0, uint32(p.To.Reg))
3395 case 73: /* mcrfs crfD,crfS */
3396 if p.From.Type != obj.TYPE_REG || p.From.Reg != REG_FPSCR || p.To.Type != obj.TYPE_REG || p.To.Reg < REG_CR0 || REG_CR7 < p.To.Reg {
3397 c.ctxt.Diag("illegal FPSCR/CR field number\n%v", p)
3399 o1 = AOP_RRR(OP_MCRFS, ((uint32(p.To.Reg) & 7) << 2), ((0 & 7) << 2), 0)
3401 case 77: /* syscall $scon, syscall Rx */
3402 if p.From.Type == obj.TYPE_CONST {
3403 if p.From.Offset > BIG || p.From.Offset < -BIG {
3404 c.ctxt.Diag("illegal syscall, sysnum too large: %v", p)
3406 o1 = AOP_IRR(OP_ADDI, REGZERO, REGZERO, uint32(p.From.Offset))
3407 } else if p.From.Type == obj.TYPE_REG {
3408 o1 = LOP_RRR(OP_OR, REGZERO, uint32(p.From.Reg), uint32(p.From.Reg))
3410 c.ctxt.Diag("illegal syscall: %v", p)
3411 o1 = 0x7fe00008 // trap always
3415 o3 = AOP_RRR(c.oprrr(AXOR), REGZERO, REGZERO, REGZERO) // XOR R0, R0
3417 case 78: /* undef */
3418 o1 = 0 /* "An instruction consisting entirely of binary 0s is guaranteed
3419 always to be an illegal instruction." */
3421 /* relocation operations */
3423 v := c.vregoff(&p.To)
3424 // Offsets in DS form stores must be a multiple of 4
3425 inst := c.opstore(p.As)
3426 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3427 log.Fatalf("invalid offset for DS form load/store %v", p)
3429 o1, o2 = c.symbolAccess(p.To.Sym, v, p.From.Reg, inst)
3431 case 75: // 32 bit offset symbol loads (got/toc/addr)
3434 // Offsets in DS form loads must be a multiple of 4
3435 inst := c.opload(p.As)
3436 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3437 log.Fatalf("invalid offset for DS form load/store %v", p)
3439 switch p.From.Name {
3440 case obj.NAME_GOTREF, obj.NAME_TOCREF:
3442 c.ctxt.Diag("invalid offset for GOT/TOC access %v", p)
3444 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
3445 o2 = AOP_IRR(inst, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3446 rel := obj.Addrel(c.cursym)
3447 rel.Off = int32(c.pc)
3449 rel.Sym = p.From.Sym
3450 switch p.From.Name {
3451 case obj.NAME_GOTREF:
3452 rel.Type = objabi.R_ADDRPOWER_GOT
3453 case obj.NAME_TOCREF:
3454 rel.Type = objabi.R_ADDRPOWER_TOCREL_DS
3457 o1, o2 = c.symbolAccess(p.From.Sym, v, p.To.Reg, inst)
3461 v := c.vregoff(&p.From)
3462 // Offsets in DS form loads must be a multiple of 4
3463 inst := c.opload(p.As)
3464 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3465 log.Fatalf("invalid offset for DS form load/store %v", p)
3467 o1, o2 = c.symbolAccess(p.From.Sym, v, p.To.Reg, inst)
3468 o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3471 if p.From.Offset != 0 {
3472 c.ctxt.Diag("invalid offset against tls var %v", p)
3474 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R13, 0)
3475 o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3476 rel := obj.Addrel(c.cursym)
3477 rel.Off = int32(c.pc)
3479 rel.Sym = p.From.Sym
3480 rel.Type = objabi.R_POWER_TLS_LE
3483 if p.From.Offset != 0 {
3484 c.ctxt.Diag("invalid offset against tls var %v", p)
3486 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
3487 o2 = AOP_IRR(c.opload(AMOVD), uint32(p.To.Reg), uint32(p.To.Reg), 0)
3488 o3 = AOP_RRR(OP_ADD, uint32(p.To.Reg), uint32(p.To.Reg), REG_R13)
3489 rel := obj.Addrel(c.cursym)
3490 rel.Off = int32(c.pc)
3492 rel.Sym = p.From.Sym
3493 rel.Type = objabi.R_POWER_TLS_IE
3494 rel = obj.Addrel(c.cursym)
3495 rel.Off = int32(c.pc) + 8
3497 rel.Sym = p.From.Sym
3498 rel.Type = objabi.R_POWER_TLS
3500 case 82: /* vector instructions, VX-form and VC-form */
3501 if p.From.Type == obj.TYPE_REG {
3502 /* reg reg none OR reg reg reg */
3503 /* 3-register operand order: VRA, VRB, VRT */
3504 /* 2-register operand order: VRA, VRT */
3505 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3506 } else if p.From3Type() == obj.TYPE_CONST {
3507 /* imm imm reg reg */
3508 /* operand order: SIX, VRA, ST, VRT */
3509 six := int(c.regoff(&p.From))
3510 st := int(c.regoff(p.GetFrom3()))
3511 o1 = AOP_IIRR(c.opiirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(st), uint32(six))
3512 } else if p.From3Type() == obj.TYPE_NONE && p.Reg != 0 {
3514 /* operand order: UIM, VRB, VRT */
3515 uim := int(c.regoff(&p.From))
3516 o1 = AOP_VIRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(uim))
3519 /* operand order: SIM, VRT */
3520 sim := int(c.regoff(&p.From))
3521 o1 = AOP_IR(c.opirr(p.As), uint32(p.To.Reg), uint32(sim))
3524 case 83: /* vector instructions, VA-form */
3525 if p.From.Type == obj.TYPE_REG {
3526 /* reg reg reg reg */
3527 /* 4-register operand order: VRA, VRB, VRC, VRT */
3528 o1 = AOP_RRRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg))
3529 } else if p.From.Type == obj.TYPE_CONST {
3530 /* imm reg reg reg */
3531 /* operand order: SHB, VRA, VRB, VRT */
3532 shb := int(c.regoff(&p.From))
3533 o1 = AOP_IRRR(c.opirrr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(shb))
3536 case 84: // ISEL BC,RA,RB,RT -> isel rt,ra,rb,bc
3537 bc := c.vregoff(&p.From)
3539 // rt = To.Reg, ra = p.Reg, rb = p.From3.Reg
3540 o1 = AOP_ISEL(OP_ISEL, uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(bc))
3542 case 85: /* vector instructions, VX-form */
3544 /* 2-register operand order: VRB, VRT */
3545 o1 = AOP_RR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg))
3547 case 86: /* VSX indexed store, XX1-form */
3549 /* 3-register operand order: XT, (RB)(RA*1) */
3550 o1 = AOP_XX1(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg))
3552 case 87: /* VSX indexed load, XX1-form */
3554 /* 3-register operand order: (RB)(RA*1), XT */
3555 o1 = AOP_XX1(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3557 case 88: /* VSX instructions, XX1-form */
3558 /* reg reg none OR reg reg reg */
3559 /* 3-register operand order: RA, RB, XT */
3560 /* 2-register operand order: XS, RA or RA, XT */
3561 xt := int32(p.To.Reg)
3562 xs := int32(p.From.Reg)
3563 /* We need to treat the special case of extended mnemonics that may have a FREG/VREG as an argument */
3564 if REG_V0 <= xt && xt <= REG_V31 {
3565 /* Convert V0-V31 to VS32-VS63 */
3567 o1 = AOP_XX1(c.oprrr(p.As), uint32(xt), uint32(p.From.Reg), uint32(p.Reg))
3568 } else if REG_F0 <= xt && xt <= REG_F31 {
3569 /* Convert F0-F31 to VS0-VS31 */
3571 o1 = AOP_XX1(c.oprrr(p.As), uint32(xt), uint32(p.From.Reg), uint32(p.Reg))
3572 } else if REG_VS0 <= xt && xt <= REG_VS63 {
3573 o1 = AOP_XX1(c.oprrr(p.As), uint32(xt), uint32(p.From.Reg), uint32(p.Reg))
3574 } else if REG_V0 <= xs && xs <= REG_V31 {
3575 /* Likewise for XS */
3577 o1 = AOP_XX1(c.oprrr(p.As), uint32(xs), uint32(p.To.Reg), uint32(p.Reg))
3578 } else if REG_F0 <= xs && xs <= REG_F31 {
3580 o1 = AOP_XX1(c.oprrr(p.As), uint32(xs), uint32(p.To.Reg), uint32(p.Reg))
3581 } else if REG_VS0 <= xs && xs <= REG_VS63 {
3582 o1 = AOP_XX1(c.oprrr(p.As), uint32(xs), uint32(p.To.Reg), uint32(p.Reg))
3585 case 89: /* VSX instructions, XX2-form */
3586 /* reg none reg OR reg imm reg */
3587 /* 2-register operand order: XB, XT or XB, UIM, XT*/
3588 uim := int(c.regoff(p.GetFrom3()))
3589 o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(uim), uint32(p.From.Reg))
3591 case 90: /* VSX instructions, XX3-form */
3592 if p.From3Type() == obj.TYPE_NONE {
3594 /* 3-register operand order: XA, XB, XT */
3595 o1 = AOP_XX3(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3596 } else if p.From3Type() == obj.TYPE_CONST {
3597 /* reg reg reg imm */
3598 /* operand order: XA, XB, DM, XT */
3599 dm := int(c.regoff(p.GetFrom3()))
3600 o1 = AOP_XX3I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(dm))
3603 case 91: /* VSX instructions, XX4-form */
3604 /* reg reg reg reg */
3605 /* 3-register operand order: XA, XB, XC, XT */
3606 o1 = AOP_XX4(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg))
3608 case 92: /* X-form instructions, 3-operands */
3609 if p.To.Type == obj.TYPE_CONST {
3611 xf := int32(p.From.Reg)
3612 if REG_F0 <= xf && xf <= REG_F31 {
3613 /* operand order: FRA, FRB, BF */
3614 bf := int(c.regoff(&p.To)) << 2
3615 o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg))
3617 /* operand order: RA, RB, L */
3618 l := int(c.regoff(&p.To))
3619 o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.From.Reg), uint32(p.Reg))
3621 } else if p.From3Type() == obj.TYPE_CONST {
3623 /* operand order: RB, L, RA */
3624 l := int(c.regoff(p.GetFrom3()))
3625 o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.To.Reg), uint32(p.From.Reg))
3626 } else if p.To.Type == obj.TYPE_REG {
3627 cr := int32(p.To.Reg)
3628 if REG_CR0 <= cr && cr <= REG_CR7 {
3630 /* operand order: RA, RB, BF */
3631 bf := (int(p.To.Reg) & 7) << 2
3632 o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg))
3633 } else if p.From.Type == obj.TYPE_CONST {
3635 /* operand order: L, RT */
3636 l := int(c.regoff(&p.From))
3637 o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(l), uint32(p.Reg))
3640 case ACOPY, APASTECC:
3641 o1 = AOP_RRR(c.opirr(p.As), uint32(1), uint32(p.From.Reg), uint32(p.To.Reg))
3644 /* operand order: RS, RB, RA */
3645 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3650 case 93: /* X-form instructions, 2-operands */
3651 if p.To.Type == obj.TYPE_CONST {
3653 /* operand order: FRB, BF */
3654 bf := int(c.regoff(&p.To)) << 2
3655 o1 = AOP_RR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg))
3656 } else if p.Reg == 0 {
3657 /* popcnt* r,r, X-form */
3658 /* operand order: RS, RA */
3659 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3662 case 94: /* Z23-form instructions, 4-operands */
3663 /* reg reg reg imm */
3664 /* operand order: RA, RB, CY, RT */
3665 cy := int(c.regoff(p.GetFrom3()))
3666 o1 = AOP_Z23I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(cy))
3668 case 96: /* VSX load, DQ-form */
3670 /* operand order: (RA)(DQ), XT */
3671 dq := int16(c.regoff(&p.From))
3673 c.ctxt.Diag("invalid offset for DQ form load/store %v", dq)
3675 o1 = AOP_DQ(c.opload(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(dq))
3677 case 97: /* VSX store, DQ-form */
3679 /* operand order: XT, (RA)(DQ) */
3680 dq := int16(c.regoff(&p.To))
3682 c.ctxt.Diag("invalid offset for DQ form load/store %v", dq)
3684 o1 = AOP_DQ(c.opstore(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(dq))
3685 case 98: /* VSX indexed load or load with length (also left-justified), x-form */
3686 /* vsreg, reg, reg */
3687 o1 = AOP_XX1(c.opload(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3688 case 99: /* VSX store with length (also left-justified) x-form */
3689 /* reg, reg, vsreg */
3690 o1 = AOP_XX1(c.opstore(p.As), uint32(p.From.Reg), uint32(p.Reg), uint32(p.To.Reg))
3691 case 100: /* VSX X-form XXSPLTIB */
3692 if p.From.Type == obj.TYPE_CONST {
3694 uim := int(c.regoff(&p.From))
3696 /* Use AOP_XX1 form with 0 for one of the registers. */
3697 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.To.Reg), uint32(0), uint32(uim))
3699 c.ctxt.Diag("invalid ops for %v", p.As)
3702 o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(0), uint32(p.From.Reg))
3704 case 102: /* RLWMI $sh,rs,$mb,$me,rt (M-form opcode)*/
3705 mb := uint32(c.regoff(&p.RestArgs[0].Addr))
3706 me := uint32(c.regoff(&p.RestArgs[1].Addr))
3707 sh := uint32(c.regoff(&p.From))
3708 o1 = OP_RLW(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), sh, mb, me)
3710 case 103: /* RLWMI rb,rs,$mb,$me,rt (M-form opcode)*/
3711 mb := uint32(c.regoff(&p.RestArgs[0].Addr))
3712 me := uint32(c.regoff(&p.RestArgs[1].Addr))
3713 o1 = OP_RLW(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.From.Reg), mb, me)
3723 func (c *ctxt9) vregoff(a *obj.Addr) int64 {
3731 func (c *ctxt9) regoff(a *obj.Addr) int32 {
3732 return int32(c.vregoff(a))
3735 func (c *ctxt9) oprrr(a obj.As) uint32 {
3738 return OPVCC(31, 266, 0, 0)
3740 return OPVCC(31, 266, 0, 1)
3742 return OPVCC(31, 266, 1, 0)
3744 return OPVCC(31, 266, 1, 1)
3746 return OPVCC(31, 10, 0, 0)
3748 return OPVCC(31, 10, 0, 1)
3750 return OPVCC(31, 10, 1, 0)
3752 return OPVCC(31, 10, 1, 1)
3754 return OPVCC(31, 138, 0, 0)
3756 return OPVCC(31, 138, 0, 1)
3758 return OPVCC(31, 138, 1, 0)
3760 return OPVCC(31, 138, 1, 1)
3762 return OPVCC(31, 234, 0, 0)
3764 return OPVCC(31, 234, 0, 1)
3766 return OPVCC(31, 234, 1, 0)
3768 return OPVCC(31, 234, 1, 1)
3770 return OPVCC(31, 202, 0, 0)
3772 return OPVCC(31, 202, 0, 1)
3774 return OPVCC(31, 202, 1, 0)
3776 return OPVCC(31, 202, 1, 1)
3778 return OPVCC(31, 170, 0, 0) /* addex - v3.0b */
3781 return OPVCC(31, 28, 0, 0)
3783 return OPVCC(31, 28, 0, 1)
3785 return OPVCC(31, 60, 0, 0)
3787 return OPVCC(31, 60, 0, 1)
3790 return OPVCC(31, 0, 0, 0) | 1<<21 /* L=1 */
3792 return OPVCC(31, 32, 0, 0) | 1<<21
3794 return OPVCC(31, 0, 0, 0) /* L=0 */
3796 return OPVCC(31, 32, 0, 0)
3798 return OPVCC(31, 508, 0, 0) /* cmpb - v2.05 */
3800 return OPVCC(31, 224, 0, 0) /* cmpeqb - v3.00 */
3803 return OPVCC(31, 26, 0, 0)
3805 return OPVCC(31, 26, 0, 1)
3807 return OPVCC(31, 58, 0, 0)
3809 return OPVCC(31, 58, 0, 1)
3812 return OPVCC(19, 257, 0, 0)
3814 return OPVCC(19, 129, 0, 0)
3816 return OPVCC(19, 289, 0, 0)
3818 return OPVCC(19, 225, 0, 0)
3820 return OPVCC(19, 33, 0, 0)
3822 return OPVCC(19, 449, 0, 0)
3824 return OPVCC(19, 417, 0, 0)
3826 return OPVCC(19, 193, 0, 0)
3829 return OPVCC(31, 86, 0, 0)
3831 return OPVCC(31, 470, 0, 0)
3833 return OPVCC(31, 54, 0, 0)
3835 return OPVCC(31, 278, 0, 0)
3837 return OPVCC(31, 246, 0, 0)
3839 return OPVCC(31, 1014, 0, 0)
3842 return OPVCC(31, 265, 0, 0) /* modud - v3.0 */
3844 return OPVCC(31, 267, 0, 0) /* moduw - v3.0 */
3846 return OPVCC(31, 777, 0, 0) /* modsd - v3.0 */
3848 return OPVCC(31, 779, 0, 0) /* modsw - v3.0 */
3851 return OPVCC(31, 491, 0, 0)
3854 return OPVCC(31, 491, 0, 1)
3857 return OPVCC(31, 491, 1, 0)
3860 return OPVCC(31, 491, 1, 1)
3863 return OPVCC(31, 459, 0, 0)
3866 return OPVCC(31, 459, 0, 1)
3869 return OPVCC(31, 459, 1, 0)
3872 return OPVCC(31, 459, 1, 1)
3875 return OPVCC(31, 489, 0, 0)
3878 return OPVCC(31, 489, 0, 1)
3881 return OPVCC(31, 425, 0, 0)
3884 return OPVCC(31, 425, 0, 1)
3887 return OPVCC(31, 393, 0, 0)
3890 return OPVCC(31, 393, 0, 1)
3893 return OPVCC(31, 489, 1, 0)
3896 return OPVCC(31, 489, 1, 1)
3898 case ADIVDU, AREMDU:
3899 return OPVCC(31, 457, 0, 0)
3902 return OPVCC(31, 457, 0, 1)
3905 return OPVCC(31, 457, 1, 0)
3908 return OPVCC(31, 457, 1, 1)
3911 return OPVCC(31, 854, 0, 0)
3914 return OPVCC(31, 284, 0, 0)
3916 return OPVCC(31, 284, 0, 1)
3919 return OPVCC(31, 954, 0, 0)
3921 return OPVCC(31, 954, 0, 1)
3923 return OPVCC(31, 922, 0, 0)
3925 return OPVCC(31, 922, 0, 1)
3927 return OPVCC(31, 986, 0, 0)
3929 return OPVCC(31, 986, 0, 1)
3932 return OPVCC(63, 264, 0, 0)
3934 return OPVCC(63, 264, 0, 1)
3936 return OPVCC(63, 21, 0, 0)
3938 return OPVCC(63, 21, 0, 1)
3940 return OPVCC(59, 21, 0, 0)
3942 return OPVCC(59, 21, 0, 1)
3944 return OPVCC(63, 32, 0, 0)
3946 return OPVCC(63, 0, 0, 0)
3948 return OPVCC(63, 846, 0, 0)
3950 return OPVCC(63, 846, 0, 1)
3952 return OPVCC(63, 974, 0, 0)
3954 return OPVCC(63, 974, 0, 1)
3956 return OPVCC(59, 846, 0, 0)
3958 return OPVCC(59, 846, 0, 1)
3960 return OPVCC(63, 14, 0, 0)
3962 return OPVCC(63, 14, 0, 1)
3964 return OPVCC(63, 15, 0, 0)
3966 return OPVCC(63, 15, 0, 1)
3968 return OPVCC(63, 814, 0, 0)
3970 return OPVCC(63, 814, 0, 1)
3972 return OPVCC(63, 815, 0, 0)
3974 return OPVCC(63, 815, 0, 1)
3976 return OPVCC(63, 18, 0, 0)
3978 return OPVCC(63, 18, 0, 1)
3980 return OPVCC(59, 18, 0, 0)
3982 return OPVCC(59, 18, 0, 1)
3984 return OPVCC(63, 29, 0, 0)
3986 return OPVCC(63, 29, 0, 1)
3988 return OPVCC(59, 29, 0, 0)
3990 return OPVCC(59, 29, 0, 1)
3992 case AFMOVS, AFMOVD:
3993 return OPVCC(63, 72, 0, 0) /* load */
3995 return OPVCC(63, 72, 0, 1)
3997 return OPVCC(63, 28, 0, 0)
3999 return OPVCC(63, 28, 0, 1)
4001 return OPVCC(59, 28, 0, 0)
4003 return OPVCC(59, 28, 0, 1)
4005 return OPVCC(63, 25, 0, 0)
4007 return OPVCC(63, 25, 0, 1)
4009 return OPVCC(59, 25, 0, 0)
4011 return OPVCC(59, 25, 0, 1)
4013 return OPVCC(63, 136, 0, 0)
4015 return OPVCC(63, 136, 0, 1)
4017 return OPVCC(63, 40, 0, 0)
4019 return OPVCC(63, 40, 0, 1)
4021 return OPVCC(63, 31, 0, 0)
4023 return OPVCC(63, 31, 0, 1)
4025 return OPVCC(59, 31, 0, 0)
4027 return OPVCC(59, 31, 0, 1)
4029 return OPVCC(63, 30, 0, 0)
4031 return OPVCC(63, 30, 0, 1)
4033 return OPVCC(59, 30, 0, 0)
4035 return OPVCC(59, 30, 0, 1)
4037 return OPVCC(63, 8, 0, 0)
4039 return OPVCC(63, 8, 0, 1)
4041 return OPVCC(59, 24, 0, 0)
4043 return OPVCC(59, 24, 0, 1)
4045 return OPVCC(63, 488, 0, 0)
4047 return OPVCC(63, 488, 0, 1)
4049 return OPVCC(63, 456, 0, 0)
4051 return OPVCC(63, 456, 0, 1)
4053 return OPVCC(63, 424, 0, 0)
4055 return OPVCC(63, 424, 0, 1)
4057 return OPVCC(63, 392, 0, 0)
4059 return OPVCC(63, 392, 0, 1)
4061 return OPVCC(63, 12, 0, 0)
4063 return OPVCC(63, 12, 0, 1)
4065 return OPVCC(63, 26, 0, 0)
4067 return OPVCC(63, 26, 0, 1)
4069 return OPVCC(63, 23, 0, 0)
4071 return OPVCC(63, 23, 0, 1)
4073 return OPVCC(63, 22, 0, 0)
4075 return OPVCC(63, 22, 0, 1)
4077 return OPVCC(59, 22, 0, 0)
4079 return OPVCC(59, 22, 0, 1)
4081 return OPVCC(63, 20, 0, 0)
4083 return OPVCC(63, 20, 0, 1)
4085 return OPVCC(59, 20, 0, 0)
4087 return OPVCC(59, 20, 0, 1)
4090 return OPVCC(31, 982, 0, 0)
4092 return OPVCC(19, 150, 0, 0)
4095 return OPVCC(63, 70, 0, 0)
4097 return OPVCC(63, 70, 0, 1)
4099 return OPVCC(63, 38, 0, 0)
4101 return OPVCC(63, 38, 0, 1)
4104 return OPVCC(31, 75, 0, 0)
4106 return OPVCC(31, 75, 0, 1)
4108 return OPVCC(31, 11, 0, 0)
4110 return OPVCC(31, 11, 0, 1)
4112 return OPVCC(31, 235, 0, 0)
4114 return OPVCC(31, 235, 0, 1)
4116 return OPVCC(31, 235, 1, 0)
4118 return OPVCC(31, 235, 1, 1)
4121 return OPVCC(31, 73, 0, 0)
4123 return OPVCC(31, 73, 0, 1)
4125 return OPVCC(31, 9, 0, 0)
4127 return OPVCC(31, 9, 0, 1)
4129 return OPVCC(31, 233, 0, 0)
4131 return OPVCC(31, 233, 0, 1)
4133 return OPVCC(31, 233, 1, 0)
4135 return OPVCC(31, 233, 1, 1)
4138 return OPVCC(31, 476, 0, 0)
4140 return OPVCC(31, 476, 0, 1)
4142 return OPVCC(31, 104, 0, 0)
4144 return OPVCC(31, 104, 0, 1)
4146 return OPVCC(31, 104, 1, 0)
4148 return OPVCC(31, 104, 1, 1)
4150 return OPVCC(31, 124, 0, 0)
4152 return OPVCC(31, 124, 0, 1)
4154 return OPVCC(31, 444, 0, 0)
4156 return OPVCC(31, 444, 0, 1)
4158 return OPVCC(31, 412, 0, 0)
4160 return OPVCC(31, 412, 0, 1)
4163 return OPVCC(31, 506, 0, 0) /* popcntd - v2.06 */
4165 return OPVCC(31, 378, 0, 0) /* popcntw - v2.06 */
4167 return OPVCC(31, 122, 0, 0) /* popcntb - v2.02 */
4169 return OPVCC(31, 538, 0, 0) /* cnttzw - v3.00 */
4171 return OPVCC(31, 538, 0, 1) /* cnttzw. - v3.00 */
4173 return OPVCC(31, 570, 0, 0) /* cnttzd - v3.00 */
4175 return OPVCC(31, 570, 0, 1) /* cnttzd. - v3.00 */
4178 return OPVCC(19, 50, 0, 0)
4180 return OPVCC(19, 51, 0, 0)
4182 return OPVCC(19, 18, 0, 0)
4184 return OPVCC(19, 274, 0, 0)
4187 return OPVCC(20, 0, 0, 0)
4189 return OPVCC(20, 0, 0, 1)
4191 return OPVCC(23, 0, 0, 0)
4193 return OPVCC(23, 0, 0, 1)
4196 return OPVCC(30, 8, 0, 0)
4198 return OPVCC(30, 0, 0, 1)
4201 return OPVCC(30, 9, 0, 0)
4203 return OPVCC(30, 9, 0, 1)
4206 return OPVCC(30, 0, 0, 0)
4208 return OPVCC(30, 0, 0, 1)
4210 return OPMD(30, 1, 0) // rldicr
4212 return OPMD(30, 1, 1) // rldicr.
4215 return OPMD(30, 2, 0) // rldic
4217 return OPMD(30, 2, 1) // rldic.
4220 return OPVCC(17, 1, 0, 0)
4223 return OPVCC(31, 24, 0, 0)
4225 return OPVCC(31, 24, 0, 1)
4227 return OPVCC(31, 27, 0, 0)
4229 return OPVCC(31, 27, 0, 1)
4232 return OPVCC(31, 792, 0, 0)
4234 return OPVCC(31, 792, 0, 1)
4236 return OPVCC(31, 794, 0, 0)
4238 return OPVCC(31, 794, 0, 1)
4241 return OPVCC(31, 445, 0, 0)
4243 return OPVCC(31, 445, 0, 1)
4246 return OPVCC(31, 536, 0, 0)
4248 return OPVCC(31, 536, 0, 1)
4250 return OPVCC(31, 539, 0, 0)
4252 return OPVCC(31, 539, 0, 1)
4255 return OPVCC(31, 40, 0, 0)
4257 return OPVCC(31, 40, 0, 1)
4259 return OPVCC(31, 40, 1, 0)
4261 return OPVCC(31, 40, 1, 1)
4263 return OPVCC(31, 8, 0, 0)
4265 return OPVCC(31, 8, 0, 1)
4267 return OPVCC(31, 8, 1, 0)
4269 return OPVCC(31, 8, 1, 1)
4271 return OPVCC(31, 136, 0, 0)
4273 return OPVCC(31, 136, 0, 1)
4275 return OPVCC(31, 136, 1, 0)
4277 return OPVCC(31, 136, 1, 1)
4279 return OPVCC(31, 232, 0, 0)
4281 return OPVCC(31, 232, 0, 1)
4283 return OPVCC(31, 232, 1, 0)
4285 return OPVCC(31, 232, 1, 1)
4287 return OPVCC(31, 200, 0, 0)
4289 return OPVCC(31, 200, 0, 1)
4291 return OPVCC(31, 200, 1, 0)
4293 return OPVCC(31, 200, 1, 1)
4296 return OPVCC(31, 598, 0, 0)
4298 return OPVCC(31, 598, 0, 0) | 1<<21
4301 return OPVCC(31, 598, 0, 0) | 2<<21
4304 return OPVCC(31, 306, 0, 0)
4306 return OPVCC(31, 274, 0, 0)
4308 return OPVCC(31, 566, 0, 0)
4310 return OPVCC(31, 498, 0, 0)
4312 return OPVCC(31, 434, 0, 0)
4314 return OPVCC(31, 915, 0, 0)
4316 return OPVCC(31, 851, 0, 0)
4318 return OPVCC(31, 402, 0, 0)
4321 return OPVCC(31, 4, 0, 0)
4323 return OPVCC(31, 68, 0, 0)
4325 /* Vector (VMX/Altivec) instructions */
4326 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
4327 /* are enabled starting at POWER6 (ISA 2.05). */
4329 return OPVX(4, 1028, 0, 0) /* vand - v2.03 */
4331 return OPVX(4, 1092, 0, 0) /* vandc - v2.03 */
4333 return OPVX(4, 1412, 0, 0) /* vnand - v2.07 */
4336 return OPVX(4, 1156, 0, 0) /* vor - v2.03 */
4338 return OPVX(4, 1348, 0, 0) /* vorc - v2.07 */
4340 return OPVX(4, 1284, 0, 0) /* vnor - v2.03 */
4342 return OPVX(4, 1220, 0, 0) /* vxor - v2.03 */
4344 return OPVX(4, 1668, 0, 0) /* veqv - v2.07 */
4347 return OPVX(4, 0, 0, 0) /* vaddubm - v2.03 */
4349 return OPVX(4, 64, 0, 0) /* vadduhm - v2.03 */
4351 return OPVX(4, 128, 0, 0) /* vadduwm - v2.03 */
4353 return OPVX(4, 192, 0, 0) /* vaddudm - v2.07 */
4355 return OPVX(4, 256, 0, 0) /* vadduqm - v2.07 */
4358 return OPVX(4, 320, 0, 0) /* vaddcuq - v2.07 */
4360 return OPVX(4, 384, 0, 0) /* vaddcuw - v2.03 */
4363 return OPVX(4, 512, 0, 0) /* vaddubs - v2.03 */
4365 return OPVX(4, 576, 0, 0) /* vadduhs - v2.03 */
4367 return OPVX(4, 640, 0, 0) /* vadduws - v2.03 */
4370 return OPVX(4, 768, 0, 0) /* vaddsbs - v2.03 */
4372 return OPVX(4, 832, 0, 0) /* vaddshs - v2.03 */
4374 return OPVX(4, 896, 0, 0) /* vaddsws - v2.03 */
4377 return OPVX(4, 60, 0, 0) /* vaddeuqm - v2.07 */
4379 return OPVX(4, 61, 0, 0) /* vaddecuq - v2.07 */
4382 return OPVX(4, 776, 0, 0) /* vmulesb - v2.03 */
4384 return OPVX(4, 264, 0, 0) /* vmulosb - v2.03 */
4386 return OPVX(4, 520, 0, 0) /* vmuleub - v2.03 */
4388 return OPVX(4, 8, 0, 0) /* vmuloub - v2.03 */
4390 return OPVX(4, 840, 0, 0) /* vmulesh - v2.03 */
4392 return OPVX(4, 328, 0, 0) /* vmulosh - v2.03 */
4394 return OPVX(4, 584, 0, 0) /* vmuleuh - v2.03 */
4396 return OPVX(4, 72, 0, 0) /* vmulouh - v2.03 */
4398 return OPVX(4, 904, 0, 0) /* vmulesw - v2.07 */
4400 return OPVX(4, 392, 0, 0) /* vmulosw - v2.07 */
4402 return OPVX(4, 648, 0, 0) /* vmuleuw - v2.07 */
4404 return OPVX(4, 136, 0, 0) /* vmulouw - v2.07 */
4406 return OPVX(4, 137, 0, 0) /* vmuluwm - v2.07 */
4409 return OPVX(4, 1032, 0, 0) /* vpmsumb - v2.07 */
4411 return OPVX(4, 1096, 0, 0) /* vpmsumh - v2.07 */
4413 return OPVX(4, 1160, 0, 0) /* vpmsumw - v2.07 */
4415 return OPVX(4, 1224, 0, 0) /* vpmsumd - v2.07 */
4418 return OPVX(4, 35, 0, 0) /* vmsumudm - v3.00b */
4421 return OPVX(4, 1024, 0, 0) /* vsububm - v2.03 */
4423 return OPVX(4, 1088, 0, 0) /* vsubuhm - v2.03 */
4425 return OPVX(4, 1152, 0, 0) /* vsubuwm - v2.03 */
4427 return OPVX(4, 1216, 0, 0) /* vsubudm - v2.07 */
4429 return OPVX(4, 1280, 0, 0) /* vsubuqm - v2.07 */
4432 return OPVX(4, 1344, 0, 0) /* vsubcuq - v2.07 */
4434 return OPVX(4, 1408, 0, 0) /* vsubcuw - v2.03 */
4437 return OPVX(4, 1536, 0, 0) /* vsububs - v2.03 */
4439 return OPVX(4, 1600, 0, 0) /* vsubuhs - v2.03 */
4441 return OPVX(4, 1664, 0, 0) /* vsubuws - v2.03 */
4444 return OPVX(4, 1792, 0, 0) /* vsubsbs - v2.03 */
4446 return OPVX(4, 1856, 0, 0) /* vsubshs - v2.03 */
4448 return OPVX(4, 1920, 0, 0) /* vsubsws - v2.03 */
4451 return OPVX(4, 62, 0, 0) /* vsubeuqm - v2.07 */
4453 return OPVX(4, 63, 0, 0) /* vsubecuq - v2.07 */
4456 return OPVX(4, 4, 0, 0) /* vrlb - v2.03 */
4458 return OPVX(4, 68, 0, 0) /* vrlh - v2.03 */
4460 return OPVX(4, 132, 0, 0) /* vrlw - v2.03 */
4462 return OPVX(4, 196, 0, 0) /* vrld - v2.07 */
4465 return OPVX(4, 1676, 0, 0) /* vmrgow - v2.07 */
4467 return OPVX(4, 1932, 0, 0) /* vmrgew - v2.07 */
4470 return OPVX(4, 260, 0, 0) /* vslh - v2.03 */
4472 return OPVX(4, 324, 0, 0) /* vslh - v2.03 */
4474 return OPVX(4, 388, 0, 0) /* vslw - v2.03 */
4476 return OPVX(4, 452, 0, 0) /* vsl - v2.03 */
4478 return OPVX(4, 1036, 0, 0) /* vsl - v2.03 */
4480 return OPVX(4, 516, 0, 0) /* vsrb - v2.03 */
4482 return OPVX(4, 580, 0, 0) /* vsrh - v2.03 */
4484 return OPVX(4, 644, 0, 0) /* vsrw - v2.03 */
4486 return OPVX(4, 708, 0, 0) /* vsr - v2.03 */
4488 return OPVX(4, 1100, 0, 0) /* vsro - v2.03 */
4490 return OPVX(4, 1476, 0, 0) /* vsld - v2.07 */
4492 return OPVX(4, 1732, 0, 0) /* vsrd - v2.07 */
4495 return OPVX(4, 772, 0, 0) /* vsrab - v2.03 */
4497 return OPVX(4, 836, 0, 0) /* vsrah - v2.03 */
4499 return OPVX(4, 900, 0, 0) /* vsraw - v2.03 */
4501 return OPVX(4, 964, 0, 0) /* vsrad - v2.07 */
4504 return OPVC(4, 1356, 0, 0) /* vbpermq - v2.07 */
4506 return OPVC(4, 1484, 0, 0) /* vbpermd - v3.00 */
4509 return OPVX(4, 1794, 0, 0) /* vclzb - v2.07 */
4511 return OPVX(4, 1858, 0, 0) /* vclzh - v2.07 */
4513 return OPVX(4, 1922, 0, 0) /* vclzw - v2.07 */
4515 return OPVX(4, 1986, 0, 0) /* vclzd - v2.07 */
4518 return OPVX(4, 1795, 0, 0) /* vpopcntb - v2.07 */
4520 return OPVX(4, 1859, 0, 0) /* vpopcnth - v2.07 */
4522 return OPVX(4, 1923, 0, 0) /* vpopcntw - v2.07 */
4524 return OPVX(4, 1987, 0, 0) /* vpopcntd - v2.07 */
4527 return OPVC(4, 6, 0, 0) /* vcmpequb - v2.03 */
4529 return OPVC(4, 6, 0, 1) /* vcmpequb. - v2.03 */
4531 return OPVC(4, 70, 0, 0) /* vcmpequh - v2.03 */
4533 return OPVC(4, 70, 0, 1) /* vcmpequh. - v2.03 */
4535 return OPVC(4, 134, 0, 0) /* vcmpequw - v2.03 */
4537 return OPVC(4, 134, 0, 1) /* vcmpequw. - v2.03 */
4539 return OPVC(4, 199, 0, 0) /* vcmpequd - v2.07 */
4541 return OPVC(4, 199, 0, 1) /* vcmpequd. - v2.07 */
4544 return OPVC(4, 518, 0, 0) /* vcmpgtub - v2.03 */
4546 return OPVC(4, 518, 0, 1) /* vcmpgtub. - v2.03 */
4548 return OPVC(4, 582, 0, 0) /* vcmpgtuh - v2.03 */
4550 return OPVC(4, 582, 0, 1) /* vcmpgtuh. - v2.03 */
4552 return OPVC(4, 646, 0, 0) /* vcmpgtuw - v2.03 */
4554 return OPVC(4, 646, 0, 1) /* vcmpgtuw. - v2.03 */
4556 return OPVC(4, 711, 0, 0) /* vcmpgtud - v2.07 */
4558 return OPVC(4, 711, 0, 1) /* vcmpgtud. v2.07 */
4560 return OPVC(4, 774, 0, 0) /* vcmpgtsb - v2.03 */
4562 return OPVC(4, 774, 0, 1) /* vcmpgtsb. - v2.03 */
4564 return OPVC(4, 838, 0, 0) /* vcmpgtsh - v2.03 */
4566 return OPVC(4, 838, 0, 1) /* vcmpgtsh. - v2.03 */
4568 return OPVC(4, 902, 0, 0) /* vcmpgtsw - v2.03 */
4570 return OPVC(4, 902, 0, 1) /* vcmpgtsw. - v2.03 */
4572 return OPVC(4, 967, 0, 0) /* vcmpgtsd - v2.07 */
4574 return OPVC(4, 967, 0, 1) /* vcmpgtsd. - v2.07 */
4577 return OPVC(4, 263, 0, 0) /* vcmpnezb - v3.00 */
4579 return OPVC(4, 263, 0, 1) /* vcmpnezb. - v3.00 */
4581 return OPVC(4, 7, 0, 0) /* vcmpneb - v3.00 */
4583 return OPVC(4, 7, 0, 1) /* vcmpneb. - v3.00 */
4585 return OPVC(4, 71, 0, 0) /* vcmpneh - v3.00 */
4587 return OPVC(4, 71, 0, 1) /* vcmpneh. - v3.00 */
4589 return OPVC(4, 135, 0, 0) /* vcmpnew - v3.00 */
4591 return OPVC(4, 135, 0, 1) /* vcmpnew. - v3.00 */
4594 return OPVX(4, 43, 0, 0) /* vperm - v2.03 */
4596 return OPVX(4, 45, 0, 0) /* vpermxor - v2.03 */
4598 return OPVX(4, 59, 0, 0) /* vpermr - v3.0 */
4601 return OPVX(4, 42, 0, 0) /* vsel - v2.03 */
4604 return OPVX(4, 1288, 0, 0) /* vcipher - v2.07 */
4606 return OPVX(4, 1289, 0, 0) /* vcipherlast - v2.07 */
4608 return OPVX(4, 1352, 0, 0) /* vncipher - v2.07 */
4610 return OPVX(4, 1353, 0, 0) /* vncipherlast - v2.07 */
4612 return OPVX(4, 1480, 0, 0) /* vsbox - v2.07 */
4613 /* End of vector instructions */
4615 /* Vector scalar (VSX) instructions */
4616 /* ISA 2.06 enables these for POWER7. */
4617 case AMFVSRD, AMFVRD, AMFFPRD:
4618 return OPVXX1(31, 51, 0) /* mfvsrd - v2.07 */
4620 return OPVXX1(31, 115, 0) /* mfvsrwz - v2.07 */
4622 return OPVXX1(31, 307, 0) /* mfvsrld - v3.00 */
4624 case AMTVSRD, AMTFPRD, AMTVRD:
4625 return OPVXX1(31, 179, 0) /* mtvsrd - v2.07 */
4627 return OPVXX1(31, 211, 0) /* mtvsrwa - v2.07 */
4629 return OPVXX1(31, 243, 0) /* mtvsrwz - v2.07 */
4631 return OPVXX1(31, 435, 0) /* mtvsrdd - v3.00 */
4633 return OPVXX1(31, 403, 0) /* mtvsrws - v3.00 */
4636 return OPVXX3(60, 130, 0) /* xxland - v2.06 */
4638 return OPVXX3(60, 138, 0) /* xxlandc - v2.06 */
4640 return OPVXX3(60, 186, 0) /* xxleqv - v2.07 */
4642 return OPVXX3(60, 178, 0) /* xxlnand - v2.07 */
4645 return OPVXX3(60, 170, 0) /* xxlorc - v2.07 */
4647 return OPVXX3(60, 162, 0) /* xxlnor - v2.06 */
4648 case AXXLOR, AXXLORQ:
4649 return OPVXX3(60, 146, 0) /* xxlor - v2.06 */
4651 return OPVXX3(60, 154, 0) /* xxlxor - v2.06 */
4654 return OPVXX4(60, 3, 0) /* xxsel - v2.06 */
4657 return OPVXX3(60, 18, 0) /* xxmrghw - v2.06 */
4659 return OPVXX3(60, 50, 0) /* xxmrglw - v2.06 */
4662 return OPVXX2(60, 164, 0) /* xxspltw - v2.06 */
4665 return OPVCC(60, 360, 0, 0) /* xxspltib - v3.0 */
4668 return OPVXX3(60, 26, 0) /* xxperm - v2.06 */
4670 return OPVXX3(60, 10, 0) /* xxpermdi - v2.06 */
4673 return OPVXX3(60, 2, 0) /* xxsldwi - v2.06 */
4676 return OPVXX2VA(60, 475, 31) /* xxbrq - v3.0 */
4678 return OPVXX2VA(60, 475, 23) /* xxbrd - v3.0 */
4680 return OPVXX2VA(60, 475, 15) /* xxbrw - v3.0 */
4682 return OPVXX2VA(60, 475, 7) /* xxbrh - v3.0 */
4685 return OPVXX2(60, 265, 0) /* xscvdpsp - v2.06 */
4687 return OPVXX2(60, 329, 0) /* xscvspdp - v2.06 */
4689 return OPVXX2(60, 267, 0) /* xscvdpspn - v2.07 */
4691 return OPVXX2(60, 331, 0) /* xscvspdpn - v2.07 */
4694 return OPVXX2(60, 393, 0) /* xvcvdpsp - v2.06 */
4696 return OPVXX2(60, 457, 0) /* xvcvspdp - v2.06 */
4699 return OPVXX2(60, 344, 0) /* xscvdpsxds - v2.06 */
4701 return OPVXX2(60, 88, 0) /* xscvdpsxws - v2.06 */
4703 return OPVXX2(60, 328, 0) /* xscvdpuxds - v2.06 */
4705 return OPVXX2(60, 72, 0) /* xscvdpuxws - v2.06 */
4708 return OPVXX2(60, 376, 0) /* xscvsxddp - v2.06 */
4710 return OPVXX2(60, 360, 0) /* xscvuxddp - v2.06 */
4712 return OPVXX2(60, 312, 0) /* xscvsxdsp - v2.06 */
4714 return OPVXX2(60, 296, 0) /* xscvuxdsp - v2.06 */
4717 return OPVXX2(60, 472, 0) /* xvcvdpsxds - v2.06 */
4719 return OPVXX2(60, 216, 0) /* xvcvdpsxws - v2.06 */
4721 return OPVXX2(60, 456, 0) /* xvcvdpuxds - v2.06 */
4723 return OPVXX2(60, 200, 0) /* xvcvdpuxws - v2.06 */
4725 return OPVXX2(60, 408, 0) /* xvcvspsxds - v2.07 */
4727 return OPVXX2(60, 152, 0) /* xvcvspsxws - v2.07 */
4729 return OPVXX2(60, 392, 0) /* xvcvspuxds - v2.07 */
4731 return OPVXX2(60, 136, 0) /* xvcvspuxws - v2.07 */
4734 return OPVXX2(60, 504, 0) /* xvcvsxddp - v2.06 */
4736 return OPVXX2(60, 248, 0) /* xvcvsxwdp - v2.06 */
4738 return OPVXX2(60, 488, 0) /* xvcvuxddp - v2.06 */
4740 return OPVXX2(60, 232, 0) /* xvcvuxwdp - v2.06 */
4742 return OPVXX2(60, 440, 0) /* xvcvsxdsp - v2.06 */
4744 return OPVXX2(60, 184, 0) /* xvcvsxwsp - v2.06 */
4746 return OPVXX2(60, 424, 0) /* xvcvuxdsp - v2.06 */
4748 return OPVXX2(60, 168, 0) /* xvcvuxwsp - v2.06 */
4749 /* End of VSX instructions */
4752 return OPVX(4, 48, 0, 0) /* maddhd - v3.00 */
4754 return OPVX(4, 49, 0, 0) /* maddhdu - v3.00 */
4756 return OPVX(4, 51, 0, 0) /* maddld - v3.00 */
4759 return OPVCC(31, 316, 0, 0)
4761 return OPVCC(31, 316, 0, 1)
4764 c.ctxt.Diag("bad r/r, r/r/r or r/r/r/r opcode %v", a)
4768 func (c *ctxt9) opirrr(a obj.As) uint32 {
4770 /* Vector (VMX/Altivec) instructions */
4771 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
4772 /* are enabled starting at POWER6 (ISA 2.05). */
4774 return OPVX(4, 44, 0, 0) /* vsldoi - v2.03 */
4777 c.ctxt.Diag("bad i/r/r/r opcode %v", a)
4781 func (c *ctxt9) opiirr(a obj.As) uint32 {
4783 /* Vector (VMX/Altivec) instructions */
4784 /* ISA 2.07 enables these for POWER8 and beyond. */
4786 return OPVX(4, 1666, 0, 0) /* vshasigmaw - v2.07 */
4788 return OPVX(4, 1730, 0, 0) /* vshasigmad - v2.07 */
4791 c.ctxt.Diag("bad i/i/r/r opcode %v", a)
4795 func (c *ctxt9) opirr(a obj.As) uint32 {
4798 return OPVCC(14, 0, 0, 0)
4800 return OPVCC(12, 0, 0, 0)
4802 return OPVCC(13, 0, 0, 0)
4804 return OPVCC(15, 0, 0, 0) /* ADDIS */
4807 return OPVCC(28, 0, 0, 0)
4809 return OPVCC(29, 0, 0, 0) /* ANDIS. */
4812 return OPVCC(18, 0, 0, 0)
4814 return OPVCC(18, 0, 0, 0) | 1
4816 return OPVCC(18, 0, 0, 0) | 1
4818 return OPVCC(18, 0, 0, 0) | 1
4820 return OPVCC(16, 0, 0, 0)
4822 return OPVCC(16, 0, 0, 0) | 1
4825 return AOP_RRR(16<<26, 12, 2, 0)
4827 return AOP_RRR(16<<26, 4, 0, 0)
4829 return AOP_RRR(16<<26, 12, 1, 0)
4831 return AOP_RRR(16<<26, 4, 1, 0)
4833 return AOP_RRR(16<<26, 12, 0, 0)
4835 return AOP_RRR(16<<26, 4, 2, 0)
4837 return AOP_RRR(16<<26, 4, 3, 0) // apparently unordered-clear
4839 return AOP_RRR(16<<26, 12, 3, 0) // apparently unordered-set
4842 return OPVCC(11, 0, 0, 0) | 1<<21 /* L=1 */
4844 return OPVCC(10, 0, 0, 0) | 1<<21
4846 return OPVCC(11, 0, 0, 0) /* L=0 */
4848 return OPVCC(10, 0, 0, 0)
4850 return OPVCC(31, 224, 0, 0) /* cmpeqb - v3.00 */
4853 return OPVCC(31, 597, 0, 0)
4856 return OPVCC(31, 774, 0, 0) /* copy - v3.00 */
4858 return OPVCC(31, 902, 0, 1) /* paste. - v3.00 */
4860 return OPVCC(31, 755, 0, 0) /* darn - v3.00 */
4862 case AMULLW, AMULLD:
4863 return OPVCC(7, 0, 0, 0) /* mulli works with MULLW or MULLD */
4866 return OPVCC(24, 0, 0, 0)
4868 return OPVCC(25, 0, 0, 0) /* ORIS */
4871 return OPVCC(20, 0, 0, 0) /* rlwimi */
4873 return OPVCC(20, 0, 0, 1)
4875 return OPMD(30, 3, 0) /* rldimi */
4877 return OPMD(30, 3, 1) /* rldimi. */
4879 return OPMD(30, 3, 0) /* rldimi */
4881 return OPMD(30, 3, 1) /* rldimi. */
4883 return OPVCC(21, 0, 0, 0) /* rlwinm */
4885 return OPVCC(21, 0, 0, 1)
4888 return OPMD(30, 0, 0) /* rldicl */
4890 return OPMD(30, 0, 1) /* rldicl. */
4892 return OPMD(30, 1, 0) /* rldicr */
4894 return OPMD(30, 1, 1) /* rldicr. */
4896 return OPMD(30, 2, 0) /* rldic */
4898 return OPMD(30, 2, 1) /* rldic. */
4901 return OPVCC(31, 824, 0, 0)
4903 return OPVCC(31, 824, 0, 1)
4905 return OPVCC(31, (413 << 1), 0, 0)
4907 return OPVCC(31, (413 << 1), 0, 1)
4909 return OPVCC(31, 445, 0, 0)
4911 return OPVCC(31, 445, 0, 1)
4914 return OPVCC(31, 725, 0, 0)
4917 return OPVCC(8, 0, 0, 0)
4920 return OPVCC(3, 0, 0, 0)
4922 return OPVCC(2, 0, 0, 0)
4924 /* Vector (VMX/Altivec) instructions */
4925 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
4926 /* are enabled starting at POWER6 (ISA 2.05). */
4928 return OPVX(4, 524, 0, 0) /* vspltb - v2.03 */
4930 return OPVX(4, 588, 0, 0) /* vsplth - v2.03 */
4932 return OPVX(4, 652, 0, 0) /* vspltw - v2.03 */
4935 return OPVX(4, 780, 0, 0) /* vspltisb - v2.03 */
4937 return OPVX(4, 844, 0, 0) /* vspltish - v2.03 */
4939 return OPVX(4, 908, 0, 0) /* vspltisw - v2.03 */
4940 /* End of vector instructions */
4943 return OPVCC(63, 128, 0, 0) /* ftdiv - v2.06 */
4945 return OPVCC(63, 160, 0, 0) /* ftsqrt - v2.06 */
4948 return OPVCC(26, 0, 0, 0) /* XORIL */
4950 return OPVCC(27, 0, 0, 0) /* XORIS */
4953 c.ctxt.Diag("bad opcode i/r or i/r/r %v", a)
4960 func (c *ctxt9) opload(a obj.As) uint32 {
4963 return OPVCC(58, 0, 0, 0) /* ld */
4965 return OPVCC(58, 0, 0, 1) /* ldu */
4967 return OPVCC(32, 0, 0, 0) /* lwz */
4969 return OPVCC(33, 0, 0, 0) /* lwzu */
4971 return OPVCC(58, 0, 0, 0) | 1<<1 /* lwa */
4973 return OPDQ(61, 1, 0) /* lxv - ISA v3.0 */
4975 return OPVXX1(31, 269, 0) /* lxvl - ISA v3.0 */
4977 return OPVXX1(31, 301, 0) /* lxvll - ISA v3.0 */
4979 return OPVXX1(31, 268, 0) /* lxvx - ISA v3.0 */
4983 return OPVCC(34, 0, 0, 0)
4986 case AMOVBU, AMOVBZU:
4987 return OPVCC(35, 0, 0, 0)
4989 return OPVCC(50, 0, 0, 0)
4991 return OPVCC(51, 0, 0, 0)
4993 return OPVCC(48, 0, 0, 0)
4995 return OPVCC(49, 0, 0, 0)
4997 return OPVCC(42, 0, 0, 0)
4999 return OPVCC(43, 0, 0, 0)
5001 return OPVCC(40, 0, 0, 0)
5003 return OPVCC(41, 0, 0, 0)
5005 return OPVCC(46, 0, 0, 0) /* lmw */
5008 c.ctxt.Diag("bad load opcode %v", a)
5013 * indexed load a(b),d
5015 func (c *ctxt9) oploadx(a obj.As) uint32 {
5018 return OPVCC(31, 23, 0, 0) /* lwzx */
5020 return OPVCC(31, 55, 0, 0) /* lwzux */
5022 return OPVCC(31, 341, 0, 0) /* lwax */
5024 return OPVCC(31, 373, 0, 0) /* lwaux */
5027 return OPVCC(31, 87, 0, 0) /* lbzx */
5029 case AMOVBU, AMOVBZU:
5030 return OPVCC(31, 119, 0, 0) /* lbzux */
5032 return OPVCC(31, 599, 0, 0) /* lfdx */
5034 return OPVCC(31, 631, 0, 0) /* lfdux */
5036 return OPVCC(31, 535, 0, 0) /* lfsx */
5038 return OPVCC(31, 567, 0, 0) /* lfsux */
5040 return OPVCC(31, 855, 0, 0) /* lfiwax - power6, isa 2.05 */
5042 return OPVCC(31, 887, 0, 0) /* lfiwzx - power7, isa 2.06 */
5044 return OPVCC(31, 343, 0, 0) /* lhax */
5046 return OPVCC(31, 375, 0, 0) /* lhaux */
5048 return OPVCC(31, 790, 0, 0) /* lhbrx */
5050 return OPVCC(31, 534, 0, 0) /* lwbrx */
5052 return OPVCC(31, 532, 0, 0) /* ldbrx */
5054 return OPVCC(31, 279, 0, 0) /* lhzx */
5056 return OPVCC(31, 311, 0, 0) /* lhzux */
5058 return OPVCC(31, 310, 0, 0) /* eciwx */
5060 return OPVCC(31, 52, 0, 0) /* lbarx */
5062 return OPVCC(31, 116, 0, 0) /* lharx */
5064 return OPVCC(31, 20, 0, 0) /* lwarx */
5066 return OPVCC(31, 84, 0, 0) /* ldarx */
5068 return OPVCC(31, 533, 0, 0) /* lswx */
5070 return OPVCC(31, 21, 0, 0) /* ldx */
5072 return OPVCC(31, 53, 0, 0) /* ldux */
5074 return OPVCC(31, 309, 0, 0) /* ldmx */
5076 /* Vector (VMX/Altivec) instructions */
5078 return OPVCC(31, 7, 0, 0) /* lvebx - v2.03 */
5080 return OPVCC(31, 39, 0, 0) /* lvehx - v2.03 */
5082 return OPVCC(31, 71, 0, 0) /* lvewx - v2.03 */
5084 return OPVCC(31, 103, 0, 0) /* lvx - v2.03 */
5086 return OPVCC(31, 359, 0, 0) /* lvxl - v2.03 */
5088 return OPVCC(31, 6, 0, 0) /* lvsl - v2.03 */
5090 return OPVCC(31, 38, 0, 0) /* lvsr - v2.03 */
5091 /* End of vector instructions */
5093 /* Vector scalar (VSX) instructions */
5095 return OPVXX1(31, 268, 0) /* lxvx - ISA v3.0 */
5097 return OPVXX1(31, 844, 0) /* lxvd2x - v2.06 */
5099 return OPVXX1(31, 780, 0) /* lxvw4x - v2.06 */
5101 return OPVXX1(31, 812, 0) /* lxvh8x - v3.00 */
5103 return OPVXX1(31, 876, 0) /* lxvb16x - v3.00 */
5105 return OPVXX1(31, 332, 0) /* lxvdsx - v2.06 */
5107 return OPVXX1(31, 588, 0) /* lxsdx - v2.06 */
5109 return OPVXX1(31, 76, 0) /* lxsiwax - v2.07 */
5111 return OPVXX1(31, 12, 0) /* lxsiwzx - v2.07 */
5114 c.ctxt.Diag("bad loadx opcode %v", a)
5121 func (c *ctxt9) opstore(a obj.As) uint32 {
5124 return OPVCC(38, 0, 0, 0) /* stb */
5126 case AMOVBU, AMOVBZU:
5127 return OPVCC(39, 0, 0, 0) /* stbu */
5129 return OPVCC(54, 0, 0, 0) /* stfd */
5131 return OPVCC(55, 0, 0, 0) /* stfdu */
5133 return OPVCC(52, 0, 0, 0) /* stfs */
5135 return OPVCC(53, 0, 0, 0) /* stfsu */
5138 return OPVCC(44, 0, 0, 0) /* sth */
5140 case AMOVHZU, AMOVHU:
5141 return OPVCC(45, 0, 0, 0) /* sthu */
5143 return OPVCC(47, 0, 0, 0) /* stmw */
5145 return OPVCC(31, 725, 0, 0) /* stswi */
5148 return OPVCC(36, 0, 0, 0) /* stw */
5150 case AMOVWZU, AMOVWU:
5151 return OPVCC(37, 0, 0, 0) /* stwu */
5153 return OPVCC(62, 0, 0, 0) /* std */
5155 return OPVCC(62, 0, 0, 1) /* stdu */
5157 return OPDQ(61, 5, 0) /* stxv ISA 3.0 */
5159 return OPVXX1(31, 397, 0) /* stxvl ISA 3.0 */
5161 return OPVXX1(31, 429, 0) /* stxvll ISA 3.0 */
5163 return OPVXX1(31, 396, 0) /* stxvx - ISA v3.0 */
5167 c.ctxt.Diag("unknown store opcode %v", a)
5172 * indexed store s,a(b)
5174 func (c *ctxt9) opstorex(a obj.As) uint32 {
5177 return OPVCC(31, 215, 0, 0) /* stbx */
5179 case AMOVBU, AMOVBZU:
5180 return OPVCC(31, 247, 0, 0) /* stbux */
5182 return OPVCC(31, 727, 0, 0) /* stfdx */
5184 return OPVCC(31, 759, 0, 0) /* stfdux */
5186 return OPVCC(31, 663, 0, 0) /* stfsx */
5188 return OPVCC(31, 695, 0, 0) /* stfsux */
5190 return OPVCC(31, 983, 0, 0) /* stfiwx */
5193 return OPVCC(31, 407, 0, 0) /* sthx */
5195 return OPVCC(31, 918, 0, 0) /* sthbrx */
5197 case AMOVHZU, AMOVHU:
5198 return OPVCC(31, 439, 0, 0) /* sthux */
5201 return OPVCC(31, 151, 0, 0) /* stwx */
5203 case AMOVWZU, AMOVWU:
5204 return OPVCC(31, 183, 0, 0) /* stwux */
5206 return OPVCC(31, 661, 0, 0) /* stswx */
5208 return OPVCC(31, 662, 0, 0) /* stwbrx */
5210 return OPVCC(31, 660, 0, 0) /* stdbrx */
5212 return OPVCC(31, 694, 0, 1) /* stbcx. */
5214 return OPVCC(31, 726, 0, 1) /* sthcx. */
5216 return OPVCC(31, 150, 0, 1) /* stwcx. */
5218 return OPVCC(31, 214, 0, 1) /* stwdx. */
5220 return OPVCC(31, 438, 0, 0) /* ecowx */
5222 return OPVCC(31, 149, 0, 0) /* stdx */
5224 return OPVCC(31, 181, 0, 0) /* stdux */
5226 /* Vector (VMX/Altivec) instructions */
5228 return OPVCC(31, 135, 0, 0) /* stvebx - v2.03 */
5230 return OPVCC(31, 167, 0, 0) /* stvehx - v2.03 */
5232 return OPVCC(31, 199, 0, 0) /* stvewx - v2.03 */
5234 return OPVCC(31, 231, 0, 0) /* stvx - v2.03 */
5236 return OPVCC(31, 487, 0, 0) /* stvxl - v2.03 */
5237 /* End of vector instructions */
5239 /* Vector scalar (VSX) instructions */
5241 return OPVXX1(31, 396, 0) /* stxvx - v3.0 */
5243 return OPVXX1(31, 972, 0) /* stxvd2x - v2.06 */
5245 return OPVXX1(31, 908, 0) /* stxvw4x - v2.06 */
5247 return OPVXX1(31, 940, 0) /* stxvh8x - v3.0 */
5249 return OPVXX1(31, 1004, 0) /* stxvb16x - v3.0 */
5252 return OPVXX1(31, 716, 0) /* stxsdx - v2.06 */
5255 return OPVXX1(31, 140, 0) /* stxsiwx - v2.07 */
5257 /* End of vector scalar instructions */
5261 c.ctxt.Diag("unknown storex opcode %v", a)