1 // cmd/9l/optab.c, cmd/9l/asmout.c from Vita Nuova.
3 // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
4 // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
5 // Portions Copyright © 1997-1999 Vita Nuova Limited
6 // Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com)
7 // Portions Copyright © 2004,2006 Bruce Ellis
8 // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
9 // Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others
10 // Portions Copyright © 2009 The Go Authors. All rights reserved.
12 // Permission is hereby granted, free of charge, to any person obtaining a copy
13 // of this software and associated documentation files (the "Software"), to deal
14 // in the Software without restriction, including without limitation the rights
15 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
16 // copies of the Software, and to permit persons to whom the Software is
17 // furnished to do so, subject to the following conditions:
19 // The above copyright notice and this permission notice shall be included in
20 // all copies or substantial portions of the Software.
22 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
27 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
42 // ctxt9 holds state while assembling a single function.
43 // Each function gets a fresh ctxt9.
44 // This allows for multiple functions to be safely concurrently assembled.
54 // Instruction layout.
58 funcAlignMask = funcAlign - 1
67 a1 uint8 // p.From argument (obj.Addr). p is of type obj.Prog.
68 a2 uint8 // p.Reg argument (int16 Register)
69 a3 uint8 // p.RestArgs[0] (obj.AddrPos)
70 a4 uint8 // p.RestArgs[1]
71 a5 uint8 // p.RestARgs[2]
72 a6 uint8 // p.To (obj.Addr)
73 type_ int8 // cases in asmout below. E.g., 44 = st r,(ra+rb); 45 = ld (ra+rb), r
74 size int8 // Text space in bytes to lay operation
77 // optab contains an array to be sliced of accepted operand combinations for an
78 // instruction. Unused arguments and fields are not explicitly enumerated, and
79 // should not be listed for clarity. Unused arguments and values should always
80 // assume the default value for the given type.
82 // optab does not list every valid ppc64 opcode, it enumerates representative
83 // operand combinations for a class of instruction. The variable oprange indexes
84 // all valid ppc64 opcodes.
86 // oprange is initialized to point a slice within optab which contains the valid
87 // operand combinations for a given instruction. This is initialized from buildop.
89 // Likewise, each slice of optab is dynamically sorted using the ocmp Sort interface
90 // to arrange entries to minimize text size of each opcode.
92 {as: obj.ATEXT, a1: C_LOREG, a6: C_TEXTSIZE, type_: 0, size: 0},
93 {as: obj.ATEXT, a1: C_LOREG, a3: C_LCON, a6: C_TEXTSIZE, type_: 0, size: 0},
94 {as: obj.ATEXT, a1: C_ADDR, a6: C_TEXTSIZE, type_: 0, size: 0},
95 {as: obj.ATEXT, a1: C_ADDR, a3: C_LCON, a6: C_TEXTSIZE, type_: 0, size: 0},
97 {as: AADD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
98 {as: AADD, a1: C_REG, a6: C_REG, type_: 2, size: 4},
99 {as: AADD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
100 {as: AADD, a1: C_SCON, a6: C_REG, type_: 4, size: 4},
101 {as: AADD, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
102 {as: AADD, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
103 {as: AADD, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 20, size: 4},
104 {as: AADD, a1: C_UCON, a6: C_REG, type_: 20, size: 4},
105 {as: AADD, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 22, size: 8},
106 {as: AADD, a1: C_ANDCON, a6: C_REG, type_: 22, size: 8},
107 {as: AADD, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12},
108 {as: AADD, a1: C_LCON, a6: C_REG, type_: 22, size: 12},
109 {as: AADDIS, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 20, size: 4},
110 {as: AADDIS, a1: C_ADDCON, a6: C_REG, type_: 20, size: 4},
111 {as: AADDC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
112 {as: AADDC, a1: C_REG, a6: C_REG, type_: 2, size: 4},
113 {as: AADDC, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
114 {as: AADDC, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
115 {as: AADDC, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12},
116 {as: AADDC, a1: C_LCON, a6: C_REG, type_: 22, size: 12},
117 {as: AAND, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, /* logical, no literal */
118 {as: AAND, a1: C_REG, a6: C_REG, type_: 6, size: 4},
119 {as: AANDCC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
120 {as: AANDCC, a1: C_REG, a6: C_REG, type_: 6, size: 4},
121 {as: AANDCC, a1: C_ANDCON, a6: C_REG, type_: 58, size: 4},
122 {as: AANDCC, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 58, size: 4},
123 {as: AANDCC, a1: C_UCON, a6: C_REG, type_: 59, size: 4},
124 {as: AANDCC, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
125 {as: AANDCC, a1: C_ADDCON, a6: C_REG, type_: 23, size: 8},
126 {as: AANDCC, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 23, size: 8},
127 {as: AANDCC, a1: C_LCON, a6: C_REG, type_: 23, size: 12},
128 {as: AANDCC, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 23, size: 12},
129 {as: AANDISCC, a1: C_ANDCON, a6: C_REG, type_: 59, size: 4},
130 {as: AANDISCC, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
131 {as: AMULLW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
132 {as: AMULLW, a1: C_REG, a6: C_REG, type_: 2, size: 4},
133 {as: AMULLW, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
134 {as: AMULLW, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
135 {as: AMULLW, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
136 {as: AMULLW, a1: C_ANDCON, a6: C_REG, type_: 4, size: 4},
137 {as: AMULLW, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12},
138 {as: AMULLW, a1: C_LCON, a6: C_REG, type_: 22, size: 12},
139 {as: ASUBC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 10, size: 4},
140 {as: ASUBC, a1: C_REG, a6: C_REG, type_: 10, size: 4},
141 {as: ASUBC, a1: C_REG, a3: C_ADDCON, a6: C_REG, type_: 27, size: 4},
142 {as: ASUBC, a1: C_REG, a3: C_LCON, a6: C_REG, type_: 28, size: 12},
143 {as: AOR, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, /* logical, literal not cc (or/xor) */
144 {as: AOR, a1: C_REG, a6: C_REG, type_: 6, size: 4},
145 {as: AOR, a1: C_ANDCON, a6: C_REG, type_: 58, size: 4},
146 {as: AOR, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 58, size: 4},
147 {as: AOR, a1: C_UCON, a6: C_REG, type_: 59, size: 4},
148 {as: AOR, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
149 {as: AOR, a1: C_ADDCON, a6: C_REG, type_: 23, size: 8},
150 {as: AOR, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 23, size: 8},
151 {as: AOR, a1: C_LCON, a6: C_REG, type_: 23, size: 12},
152 {as: AOR, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 23, size: 12},
153 {as: AORIS, a1: C_ANDCON, a6: C_REG, type_: 59, size: 4},
154 {as: AORIS, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
155 {as: ADIVW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4}, /* op r1[,r2],r3 */
156 {as: ADIVW, a1: C_REG, a6: C_REG, type_: 2, size: 4},
157 {as: ASUB, a1: C_REG, a2: C_REG, a6: C_REG, type_: 10, size: 4}, /* op r2[,r1],r3 */
158 {as: ASUB, a1: C_REG, a6: C_REG, type_: 10, size: 4},
159 {as: ASLW, a1: C_REG, a6: C_REG, type_: 6, size: 4},
160 {as: ASLW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
161 {as: ASLD, a1: C_REG, a6: C_REG, type_: 6, size: 4},
162 {as: ASLD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
163 {as: ASLD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 25, size: 4},
164 {as: ASLD, a1: C_SCON, a6: C_REG, type_: 25, size: 4},
165 {as: AEXTSWSLI, a1: C_SCON, a6: C_REG, type_: 25, size: 4},
166 {as: AEXTSWSLI, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 25, size: 4},
167 {as: ASLW, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 57, size: 4},
168 {as: ASLW, a1: C_SCON, a6: C_REG, type_: 57, size: 4},
169 {as: ASRAW, a1: C_REG, a6: C_REG, type_: 6, size: 4},
170 {as: ASRAW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
171 {as: ASRAW, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 56, size: 4},
172 {as: ASRAW, a1: C_SCON, a6: C_REG, type_: 56, size: 4},
173 {as: ASRAD, a1: C_REG, a6: C_REG, type_: 6, size: 4},
174 {as: ASRAD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
175 {as: ASRAD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 56, size: 4},
176 {as: ASRAD, a1: C_SCON, a6: C_REG, type_: 56, size: 4},
177 {as: ARLWMI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 62, size: 4},
178 {as: ARLWMI, a1: C_SCON, a2: C_REG, a3: C_SCON, a4: C_SCON, a6: C_REG, type_: 102, size: 4},
179 {as: ARLWMI, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 63, size: 4},
180 {as: ARLWMI, a1: C_REG, a2: C_REG, a3: C_SCON, a4: C_SCON, a6: C_REG, type_: 103, size: 4},
181 {as: ACLRLSLWI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 62, size: 4},
182 {as: ARLDMI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 30, size: 4},
183 {as: ARLDC, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 29, size: 4},
184 {as: ARLDCL, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 29, size: 4},
185 {as: ARLDCL, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
186 {as: ARLDICL, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
187 {as: ARLDICL, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
188 {as: ARLDCL, a1: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
189 {as: AFADD, a1: C_FREG, a6: C_FREG, type_: 2, size: 4},
190 {as: AFADD, a1: C_FREG, a2: C_FREG, a6: C_FREG, type_: 2, size: 4},
191 {as: AFABS, a1: C_FREG, a6: C_FREG, type_: 33, size: 4},
192 {as: AFABS, a6: C_FREG, type_: 33, size: 4},
193 {as: AFMADD, a1: C_FREG, a2: C_FREG, a3: C_FREG, a6: C_FREG, type_: 34, size: 4},
194 {as: AFMUL, a1: C_FREG, a6: C_FREG, type_: 32, size: 4},
195 {as: AFMUL, a1: C_FREG, a2: C_FREG, a6: C_FREG, type_: 32, size: 4},
197 {as: AMOVBU, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
198 {as: AMOVBU, a1: C_SOREG, a6: C_REG, type_: 8, size: 8},
200 {as: AMOVBZU, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
201 {as: AMOVBZU, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
203 {as: AMOVHBR, a1: C_REG, a6: C_ZOREG, type_: 44, size: 4},
204 {as: AMOVHBR, a1: C_ZOREG, a6: C_REG, type_: 45, size: 4},
206 {as: AMOVB, a1: C_ADDR, a6: C_REG, type_: 75, size: 12},
207 {as: AMOVB, a1: C_LOREG, a6: C_REG, type_: 36, size: 12},
208 {as: AMOVB, a1: C_SOREG, a6: C_REG, type_: 8, size: 8},
209 {as: AMOVB, a1: C_REG, a6: C_ADDR, type_: 74, size: 8},
210 {as: AMOVB, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
211 {as: AMOVB, a1: C_REG, a6: C_LOREG, type_: 35, size: 8},
212 {as: AMOVB, a1: C_REG, a6: C_REG, type_: 13, size: 4},
214 {as: AMOVBZ, a1: C_ADDR, a6: C_REG, type_: 75, size: 8},
215 {as: AMOVBZ, a1: C_LOREG, a6: C_REG, type_: 36, size: 8},
216 {as: AMOVBZ, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
217 {as: AMOVBZ, a1: C_REG, a6: C_ADDR, type_: 74, size: 8},
218 {as: AMOVBZ, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
219 {as: AMOVBZ, a1: C_REG, a6: C_LOREG, type_: 35, size: 8},
220 {as: AMOVBZ, a1: C_REG, a6: C_REG, type_: 13, size: 4},
222 {as: AMOVD, a1: C_ADDCON, a6: C_REG, type_: 3, size: 4},
223 {as: AMOVD, a1: C_ANDCON, a6: C_REG, type_: 3, size: 4},
224 {as: AMOVD, a1: C_UCON, a6: C_REG, type_: 3, size: 4},
225 {as: AMOVD, a1: C_LCON, a6: C_REG, type_: 19, size: 8},
226 {as: AMOVD, a1: C_SACON, a6: C_REG, type_: 3, size: 4},
227 {as: AMOVD, a1: C_LACON, a6: C_REG, type_: 26, size: 8},
228 {as: AMOVD, a1: C_ADDR, a6: C_REG, type_: 75, size: 8},
229 {as: AMOVD, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
230 {as: AMOVD, a1: C_LOREG, a6: C_REG, type_: 36, size: 8},
231 {as: AMOVD, a1: C_TLS_LE, a6: C_REG, type_: 79, size: 8},
232 {as: AMOVD, a1: C_TLS_IE, a6: C_REG, type_: 80, size: 12},
233 {as: AMOVD, a1: C_SPR, a6: C_REG, type_: 66, size: 4},
234 {as: AMOVD, a1: C_REG, a6: C_ADDR, type_: 74, size: 8},
235 {as: AMOVD, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
236 {as: AMOVD, a1: C_REG, a6: C_LOREG, type_: 35, size: 8},
237 {as: AMOVD, a1: C_REG, a6: C_SPR, type_: 66, size: 4},
238 {as: AMOVD, a1: C_REG, a6: C_REG, type_: 13, size: 4},
240 {as: AMOVW, a1: C_ADDCON, a6: C_REG, type_: 3, size: 4},
241 {as: AMOVW, a1: C_ANDCON, a6: C_REG, type_: 3, size: 4},
242 {as: AMOVW, a1: C_UCON, a6: C_REG, type_: 3, size: 4},
243 {as: AMOVW, a1: C_LCON, a6: C_REG, type_: 19, size: 8},
244 {as: AMOVW, a1: C_SACON, a6: C_REG, type_: 3, size: 4},
245 {as: AMOVW, a1: C_LACON, a6: C_REG, type_: 26, size: 8},
246 {as: AMOVW, a1: C_ADDR, a6: C_REG, type_: 75, size: 8},
247 {as: AMOVW, a1: C_CREG, a6: C_REG, type_: 68, size: 4},
248 {as: AMOVW, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
249 {as: AMOVW, a1: C_LOREG, a6: C_REG, type_: 36, size: 8},
250 {as: AMOVW, a1: C_SPR, a6: C_REG, type_: 66, size: 4},
251 {as: AMOVW, a1: C_REG, a6: C_ADDR, type_: 74, size: 8},
252 {as: AMOVW, a1: C_REG, a6: C_CREG, type_: 69, size: 4},
253 {as: AMOVW, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
254 {as: AMOVW, a1: C_REG, a6: C_LOREG, type_: 35, size: 8},
255 {as: AMOVW, a1: C_REG, a6: C_SPR, type_: 66, size: 4},
256 {as: AMOVW, a1: C_REG, a6: C_REG, type_: 13, size: 4},
258 {as: AFMOVD, a1: C_ADDCON, a6: C_FREG, type_: 24, size: 8},
259 {as: AFMOVD, a1: C_SOREG, a6: C_FREG, type_: 8, size: 4},
260 {as: AFMOVD, a1: C_LOREG, a6: C_FREG, type_: 36, size: 8},
261 {as: AFMOVD, a1: C_ZCON, a6: C_FREG, type_: 24, size: 4},
262 {as: AFMOVD, a1: C_ADDR, a6: C_FREG, type_: 75, size: 8},
263 {as: AFMOVD, a1: C_FREG, a6: C_FREG, type_: 33, size: 4},
264 {as: AFMOVD, a1: C_FREG, a6: C_SOREG, type_: 7, size: 4},
265 {as: AFMOVD, a1: C_FREG, a6: C_LOREG, type_: 35, size: 8},
266 {as: AFMOVD, a1: C_FREG, a6: C_ADDR, type_: 74, size: 8},
268 {as: AFMOVSX, a1: C_ZOREG, a6: C_FREG, type_: 45, size: 4},
269 {as: AFMOVSX, a1: C_FREG, a6: C_ZOREG, type_: 44, size: 4},
271 {as: AFMOVSZ, a1: C_ZOREG, a6: C_FREG, type_: 45, size: 4},
273 {as: AMOVFL, a1: C_CREG, a6: C_CREG, type_: 67, size: 4},
274 {as: AMOVFL, a1: C_FPSCR, a6: C_CREG, type_: 73, size: 4},
275 {as: AMOVFL, a1: C_FPSCR, a6: C_FREG, type_: 53, size: 4},
276 {as: AMOVFL, a1: C_FREG, a3: C_LCON, a6: C_FPSCR, type_: 64, size: 4},
277 {as: AMOVFL, a1: C_FREG, a6: C_FPSCR, type_: 64, size: 4},
278 {as: AMOVFL, a1: C_LCON, a6: C_FPSCR, type_: 65, size: 4},
279 {as: AMOVFL, a1: C_REG, a6: C_CREG, type_: 69, size: 4},
280 {as: AMOVFL, a1: C_REG, a6: C_LCON, type_: 69, size: 4},
282 {as: ASYSCALL, type_: 5, size: 4},
283 {as: ASYSCALL, a1: C_REG, type_: 77, size: 12},
284 {as: ASYSCALL, a1: C_SCON, type_: 77, size: 12},
285 {as: ABEQ, a6: C_SBRA, type_: 16, size: 4},
286 {as: ABEQ, a1: C_CREG, a6: C_SBRA, type_: 16, size: 4},
287 {as: ABR, a6: C_LBRA, type_: 11, size: 4},
288 {as: ABR, a6: C_LBRAPIC, type_: 11, size: 8},
289 {as: ABC, a1: C_SCON, a2: C_REG, a6: C_SBRA, type_: 16, size: 4},
290 {as: ABC, a1: C_SCON, a2: C_REG, a6: C_LBRA, type_: 17, size: 4},
291 {as: ABR, a6: C_LR, type_: 18, size: 4},
292 {as: ABR, a3: C_SCON, a6: C_LR, type_: 18, size: 4},
293 {as: ABR, a6: C_CTR, type_: 18, size: 4},
294 {as: ABR, a1: C_REG, a6: C_CTR, type_: 18, size: 4},
295 {as: ABR, a6: C_ZOREG, type_: 15, size: 8},
296 {as: ABC, a2: C_REG, a6: C_LR, type_: 18, size: 4},
297 {as: ABC, a2: C_REG, a6: C_CTR, type_: 18, size: 4},
298 {as: ABC, a1: C_SCON, a2: C_REG, a6: C_LR, type_: 18, size: 4},
299 {as: ABC, a1: C_SCON, a2: C_REG, a6: C_CTR, type_: 18, size: 4},
300 {as: ABC, a6: C_ZOREG, type_: 15, size: 8},
301 {as: ASYNC, type_: 46, size: 4},
302 {as: AWORD, a1: C_LCON, type_: 40, size: 4},
303 {as: ADWORD, a1: C_LCON, type_: 31, size: 8},
304 {as: ADWORD, a1: C_DCON, type_: 31, size: 8},
305 {as: ADWORD, a1: C_LACON, type_: 31, size: 8},
306 {as: AADDME, a1: C_REG, a6: C_REG, type_: 47, size: 4},
307 {as: AEXTSB, a1: C_REG, a6: C_REG, type_: 48, size: 4},
308 {as: AEXTSB, a6: C_REG, type_: 48, size: 4},
309 {as: AISEL, a1: C_LCON, a2: C_REG, a3: C_REG, a6: C_REG, type_: 84, size: 4},
310 {as: AISEL, a1: C_ZCON, a2: C_REG, a3: C_REG, a6: C_REG, type_: 84, size: 4},
311 {as: ANEG, a1: C_REG, a6: C_REG, type_: 47, size: 4},
312 {as: ANEG, a6: C_REG, type_: 47, size: 4},
313 {as: AREM, a1: C_REG, a6: C_REG, type_: 50, size: 12},
314 {as: AREM, a1: C_REG, a2: C_REG, a6: C_REG, type_: 50, size: 12},
315 {as: AREMU, a1: C_REG, a6: C_REG, type_: 50, size: 16},
316 {as: AREMU, a1: C_REG, a2: C_REG, a6: C_REG, type_: 50, size: 16},
317 {as: AREMD, a1: C_REG, a6: C_REG, type_: 51, size: 12},
318 {as: AREMD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 51, size: 12},
319 {as: AMTFSB0, a1: C_SCON, type_: 52, size: 4},
320 /* Other ISA 2.05+ instructions */
321 {as: APOPCNTD, a1: C_REG, a6: C_REG, type_: 93, size: 4}, /* population count, x-form */
322 {as: ACMPB, a1: C_REG, a2: C_REG, a6: C_REG, type_: 92, size: 4}, /* compare byte, x-form */
323 {as: ACMPEQB, a1: C_REG, a2: C_REG, a6: C_CREG, type_: 92, size: 4}, /* compare equal byte, x-form, ISA 3.0 */
324 {as: ACMPEQB, a1: C_REG, a6: C_REG, type_: 70, size: 4},
325 {as: AFTDIV, a1: C_FREG, a2: C_FREG, a6: C_SCON, type_: 92, size: 4}, /* floating test for sw divide, x-form */
326 {as: AFTSQRT, a1: C_FREG, a6: C_SCON, type_: 93, size: 4}, /* floating test for sw square root, x-form */
327 {as: ACOPY, a1: C_REG, a6: C_REG, type_: 92, size: 4}, /* copy/paste facility, x-form */
328 {as: ADARN, a1: C_SCON, a6: C_REG, type_: 92, size: 4}, /* deliver random number, x-form */
329 {as: ALDMX, a1: C_SOREG, a6: C_REG, type_: 45, size: 4}, /* load doubleword monitored, x-form */
330 {as: AMADDHD, a1: C_REG, a2: C_REG, a3: C_REG, a6: C_REG, type_: 83, size: 4}, /* multiply-add high/low doubleword, va-form */
331 {as: AADDEX, a1: C_REG, a2: C_REG, a3: C_SCON, a6: C_REG, type_: 94, size: 4}, /* add extended using alternate carry, z23-form */
332 {as: ACRAND, a1: C_CREG, a6: C_CREG, type_: 2, size: 4}, /* logical ops for condition registers xl-form */
334 /* Vector instructions */
337 {as: ALV, a1: C_SOREG, a6: C_VREG, type_: 45, size: 4}, /* vector load, x-form */
340 {as: ASTV, a1: C_VREG, a6: C_SOREG, type_: 44, size: 4}, /* vector store, x-form */
343 {as: AVAND, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector and, vx-form */
344 {as: AVOR, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector or, vx-form */
347 {as: AVADDUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add unsigned modulo, vx-form */
348 {as: AVADDCU, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add & write carry unsigned, vx-form */
349 {as: AVADDUS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add unsigned saturate, vx-form */
350 {as: AVADDSS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add signed saturate, vx-form */
351 {as: AVADDE, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector add extended, va-form */
353 /* Vector subtract */
354 {as: AVSUBUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract unsigned modulo, vx-form */
355 {as: AVSUBCU, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract & write carry unsigned, vx-form */
356 {as: AVSUBUS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract unsigned saturate, vx-form */
357 {as: AVSUBSS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract signed saturate, vx-form */
358 {as: AVSUBE, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector subtract extended, va-form */
360 /* Vector multiply */
361 {as: AVMULESB, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector multiply, vx-form */
362 {as: AVPMSUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector polynomial multiply & sum, vx-form */
363 {as: AVMSUMUDM, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector multiply-sum, va-form */
366 {as: AVR, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector rotate, vx-form */
369 {as: AVS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector shift, vx-form */
370 {as: AVSA, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector shift algebraic, vx-form */
371 {as: AVSOI, a1: C_ANDCON, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector shift by octet immediate, va-form */
374 {as: AVCLZ, a1: C_VREG, a6: C_VREG, type_: 85, size: 4}, /* vector count leading zeros, vx-form */
375 {as: AVPOPCNT, a1: C_VREG, a6: C_VREG, type_: 85, size: 4}, /* vector population count, vx-form */
378 {as: AVCMPEQ, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare equal, vc-form */
379 {as: AVCMPGT, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare greater than, vc-form */
380 {as: AVCMPNEZB, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare not equal, vx-form */
383 {as: AVMRGOW, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector merge odd word, vx-form */
386 {as: AVPERM, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector permute, va-form */
388 /* Vector bit permute */
389 {as: AVBPERMQ, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector bit permute, vx-form */
392 {as: AVSEL, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector select, va-form */
395 {as: AVSPLTB, a1: C_SCON, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector splat, vx-form */
396 {as: AVSPLTB, a1: C_ADDCON, a2: C_VREG, a6: C_VREG, type_: 82, size: 4},
397 {as: AVSPLTISB, a1: C_SCON, a6: C_VREG, type_: 82, size: 4}, /* vector splat immediate, vx-form */
398 {as: AVSPLTISB, a1: C_ADDCON, a6: C_VREG, type_: 82, size: 4},
401 {as: AVCIPH, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES cipher, vx-form */
402 {as: AVNCIPH, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES inverse cipher, vx-form */
403 {as: AVSBOX, a1: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES subbytes, vx-form */
406 {as: AVSHASIGMA, a1: C_ANDCON, a2: C_VREG, a3: C_ANDCON, a6: C_VREG, type_: 82, size: 4}, /* vector SHA sigma, vx-form */
408 /* VSX vector load */
409 {as: ALXVD2X, a1: C_SOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx vector load, xx1-form */
410 {as: ALXV, a1: C_SOREG, a6: C_VSREG, type_: 96, size: 4}, /* vsx vector load, dq-form */
411 {as: ALXVL, a1: C_REG, a2: C_REG, a6: C_VSREG, type_: 98, size: 4}, /* vsx vector load length */
413 /* VSX vector store */
414 {as: ASTXVD2X, a1: C_VSREG, a6: C_SOREG, type_: 86, size: 4}, /* vsx vector store, xx1-form */
415 {as: ASTXV, a1: C_VSREG, a6: C_SOREG, type_: 97, size: 4}, /* vsx vector store, dq-form */
416 {as: ASTXVL, a1: C_VSREG, a2: C_REG, a6: C_REG, type_: 99, size: 4}, /* vsx vector store with length x-form */
418 /* VSX scalar load */
419 {as: ALXSDX, a1: C_SOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx scalar load, xx1-form */
421 /* VSX scalar store */
422 {as: ASTXSDX, a1: C_VSREG, a6: C_SOREG, type_: 86, size: 4}, /* vsx scalar store, xx1-form */
424 /* VSX scalar as integer load */
425 {as: ALXSIWAX, a1: C_SOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx scalar as integer load, xx1-form */
427 /* VSX scalar store as integer */
428 {as: ASTXSIWX, a1: C_VSREG, a6: C_SOREG, type_: 86, size: 4}, /* vsx scalar as integer store, xx1-form */
430 /* VSX move from VSR */
431 {as: AMFVSRD, a1: C_VSREG, a6: C_REG, type_: 88, size: 4}, /* vsx move from vsr, xx1-form */
432 {as: AMFVSRD, a1: C_FREG, a6: C_REG, type_: 88, size: 4},
433 {as: AMFVSRD, a1: C_VREG, a6: C_REG, type_: 88, size: 4},
435 /* VSX move to VSR */
436 {as: AMTVSRD, a1: C_REG, a6: C_VSREG, type_: 88, size: 4}, /* vsx move to vsr, xx1-form */
437 {as: AMTVSRD, a1: C_REG, a2: C_REG, a6: C_VSREG, type_: 88, size: 4},
438 {as: AMTVSRD, a1: C_REG, a6: C_FREG, type_: 88, size: 4},
439 {as: AMTVSRD, a1: C_REG, a6: C_VREG, type_: 88, size: 4},
442 {as: AXXLAND, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx and, xx3-form */
443 {as: AXXLOR, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx or, xx3-form */
446 {as: AXXSEL, a1: C_VSREG, a2: C_VSREG, a3: C_VSREG, a6: C_VSREG, type_: 91, size: 4}, /* vsx select, xx4-form */
449 {as: AXXMRGHW, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx merge, xx3-form */
452 {as: AXXSPLTW, a1: C_VSREG, a3: C_SCON, a6: C_VSREG, type_: 89, size: 4}, /* vsx splat, xx2-form */
453 {as: AXXSPLTIB, a1: C_SCON, a6: C_VSREG, type_: 100, size: 4}, /* vsx splat, xx2-form */
456 {as: AXXPERM, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx permute, xx3-form */
459 {as: AXXSLDWI, a1: C_VSREG, a2: C_VSREG, a3: C_SCON, a6: C_VSREG, type_: 90, size: 4}, /* vsx shift immediate, xx3-form */
461 /* VSX reverse bytes */
462 {as: AXXBRQ, a1: C_VSREG, a6: C_VSREG, type_: 101, size: 4}, /* vsx reverse bytes */
464 /* VSX scalar FP-FP conversion */
465 {as: AXSCVDPSP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar fp-fp conversion, xx2-form */
467 /* VSX vector FP-FP conversion */
468 {as: AXVCVDPSP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector fp-fp conversion, xx2-form */
470 /* VSX scalar FP-integer conversion */
471 {as: AXSCVDPSXDS, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar fp-integer conversion, xx2-form */
473 /* VSX scalar integer-FP conversion */
474 {as: AXSCVSXDDP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar integer-fp conversion, xx2-form */
476 /* VSX vector FP-integer conversion */
477 {as: AXVCVDPSXDS, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector fp-integer conversion, xx2-form */
479 /* VSX vector integer-FP conversion */
480 {as: AXVCVSXDDP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector integer-fp conversion, xx2-form */
482 {as: ACMP, a1: C_REG, a6: C_REG, type_: 70, size: 4},
483 {as: ACMP, a1: C_REG, a2: C_REG, a6: C_REG, type_: 70, size: 4},
484 {as: ACMP, a1: C_REG, a6: C_ADDCON, type_: 71, size: 4},
485 {as: ACMP, a1: C_REG, a2: C_REG, a6: C_ADDCON, type_: 71, size: 4},
486 {as: ACMPU, a1: C_REG, a6: C_REG, type_: 70, size: 4},
487 {as: ACMPU, a1: C_REG, a2: C_REG, a6: C_REG, type_: 70, size: 4},
488 {as: ACMPU, a1: C_REG, a6: C_ANDCON, type_: 71, size: 4},
489 {as: ACMPU, a1: C_REG, a2: C_REG, a6: C_ANDCON, type_: 71, size: 4},
490 {as: AFCMPO, a1: C_FREG, a6: C_FREG, type_: 70, size: 4},
491 {as: AFCMPO, a1: C_FREG, a2: C_REG, a6: C_FREG, type_: 70, size: 4},
492 {as: ATW, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 60, size: 4},
493 {as: ATW, a1: C_LCON, a2: C_REG, a6: C_ADDCON, type_: 61, size: 4},
494 {as: ADCBF, a1: C_ZOREG, type_: 43, size: 4},
495 {as: ADCBF, a1: C_SOREG, type_: 43, size: 4},
496 {as: ADCBF, a1: C_ZOREG, a2: C_REG, a6: C_SCON, type_: 43, size: 4},
497 {as: ADCBF, a1: C_SOREG, a6: C_SCON, type_: 43, size: 4},
498 {as: AECOWX, a1: C_REG, a2: C_REG, a6: C_ZOREG, type_: 44, size: 4},
499 {as: AECIWX, a1: C_ZOREG, a2: C_REG, a6: C_REG, type_: 45, size: 4},
500 {as: AECOWX, a1: C_REG, a6: C_ZOREG, type_: 44, size: 4},
501 {as: AECIWX, a1: C_ZOREG, a6: C_REG, type_: 45, size: 4},
502 {as: ALDAR, a1: C_ZOREG, a6: C_REG, type_: 45, size: 4},
503 {as: ALDAR, a1: C_ZOREG, a3: C_ANDCON, a6: C_REG, type_: 45, size: 4},
504 {as: AEIEIO, type_: 46, size: 4},
505 {as: ATLBIE, a1: C_REG, type_: 49, size: 4},
506 {as: ATLBIE, a1: C_SCON, a6: C_REG, type_: 49, size: 4},
507 {as: ASLBMFEE, a1: C_REG, a6: C_REG, type_: 55, size: 4},
508 {as: ASLBMTE, a1: C_REG, a6: C_REG, type_: 55, size: 4},
509 {as: ASTSW, a1: C_REG, a6: C_ZOREG, type_: 44, size: 4},
510 {as: ASTSW, a1: C_REG, a3: C_LCON, a6: C_ZOREG, type_: 41, size: 4},
511 {as: ALSW, a1: C_ZOREG, a6: C_REG, type_: 45, size: 4},
512 {as: ALSW, a1: C_ZOREG, a3: C_LCON, a6: C_REG, type_: 42, size: 4},
513 {as: obj.AUNDEF, type_: 78, size: 4},
514 {as: obj.APCDATA, a1: C_LCON, a6: C_LCON, type_: 0, size: 0},
515 {as: obj.AFUNCDATA, a1: C_SCON, a6: C_ADDR, type_: 0, size: 0},
516 {as: obj.ANOP, type_: 0, size: 0},
517 {as: obj.ANOP, a1: C_LCON, type_: 0, size: 0}, // NOP operand variations added for #40689
518 {as: obj.ANOP, a1: C_REG, type_: 0, size: 0}, // to preserve previous behavior
519 {as: obj.ANOP, a1: C_FREG, type_: 0, size: 0},
520 {as: obj.ADUFFZERO, a6: C_LBRA, type_: 11, size: 4}, // same as ABR/ABL
521 {as: obj.ADUFFCOPY, a6: C_LBRA, type_: 11, size: 4}, // same as ABR/ABL
522 {as: obj.APCALIGN, a1: C_LCON, type_: 0, size: 0}, // align code
524 {as: obj.AXXX, type_: 0, size: 4},
527 var oprange [ALAST & obj.AMask][]Optab
529 var xcmp [C_NCLASS][C_NCLASS]bool
531 // padding bytes to add to align code as requested
532 func addpad(pc, a int64, ctxt *obj.Link, cursym *obj.LSym) int {
533 // For 16 and 32 byte alignment, there is a tradeoff
534 // between aligning the code and adding too many NOPs.
541 // Align to 16 bytes if possible but add at
550 // Align to 32 bytes if possible but add at
560 // When 32 byte alignment is requested on Linux,
561 // promote the function's alignment to 32. On AIX
562 // the function alignment is not changed which might
563 // result in 16 byte alignment but that is still fine.
564 // TODO: alignment on AIX
565 if ctxt.Headtype != objabi.Haix && cursym.Func().Align < 32 {
566 cursym.Func().Align = 32
569 ctxt.Diag("Unexpected alignment: %d for PCALIGN directive\n", a)
574 // Get the implied register of a operand which doesn't specify one. These show up
575 // in handwritten asm like "MOVD R5, foosymbol" where a base register is not supplied,
576 // or "MOVD R5, foo+10(SP) or pseudo-register is used. The other common case is when
577 // generating constants in register like "MOVD $constant, Rx".
578 func (c *ctxt9) getimpliedreg(a *obj.Addr, p *obj.Prog) int {
580 case C_ADDCON, C_ANDCON, C_UCON, C_LCON, C_SCON, C_ZCON:
582 case C_SACON, C_LACON:
584 case C_LOREG, C_SOREG, C_ZOREG:
586 case obj.NAME_EXTERN, obj.NAME_STATIC:
588 case obj.NAME_AUTO, obj.NAME_PARAM:
594 c.ctxt.Diag("failed to determine implied reg for class %v (%v)", DRconv(oclass(a)), p)
598 func span9(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
599 p := cursym.Func().Text
600 if p == nil || p.Link == nil { // handle external functions and ELF section symbols
604 if oprange[AANDN&obj.AMask] == nil {
605 ctxt.Diag("ppc64 ops not initialized, call ppc64.buildop first")
608 c := ctxt9{ctxt: ctxt, newprog: newprog, cursym: cursym, autosize: int32(p.To.Offset)}
615 for p = p.Link; p != nil; p = p.Link {
620 if p.As == obj.APCALIGN {
621 a := c.vregoff(&p.From)
622 m = addpad(pc, a, ctxt, cursym)
624 if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
625 ctxt.Diag("zero-width instruction\n%v", p)
636 * if any procedure is large enough to
637 * generate a large SBRA branch, then
638 * generate extra passes putting branches
639 * around jmps to fix. this is rare.
648 for p = c.cursym.Func().Text.Link; p != nil; p = p.Link {
652 // very large conditional branches
653 if (o.type_ == 16 || o.type_ == 17) && p.To.Target() != nil {
654 otxt = p.To.Target().Pc - pc
655 if otxt < -(1<<15)+10 || otxt >= (1<<15)-10 {
660 q.To.Type = obj.TYPE_BRANCH
661 q.To.SetTarget(p.To.Target())
667 q.To.Type = obj.TYPE_BRANCH
668 q.To.SetTarget(q.Link.Link)
678 if p.As == obj.APCALIGN {
679 a := c.vregoff(&p.From)
680 m = addpad(pc, a, ctxt, cursym)
682 if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
683 ctxt.Diag("zero-width instruction\n%v", p)
695 if r := pc & funcAlignMask; r != 0 {
702 * lay out the code, emitting code and data relocations.
705 c.cursym.Grow(c.cursym.Size)
710 for p := c.cursym.Func().Text.Link; p != nil; p = p.Link {
713 if int(o.size) > 4*len(out) {
714 log.Fatalf("out array in span9 is too small, need at least %d for %v", o.size/4, p)
716 // asmout is not set up to add large amounts of padding
717 if o.type_ == 0 && p.As == obj.APCALIGN {
718 pad := LOP_RRR(OP_OR, REGZERO, REGZERO, REGZERO)
719 aln := c.vregoff(&p.From)
720 v := addpad(p.Pc, aln, c.ctxt, c.cursym)
722 // Same padding instruction for all
723 for i = 0; i < int32(v/4); i++ {
724 c.ctxt.Arch.ByteOrder.PutUint32(bp, pad)
729 c.asmout(p, o, out[:])
730 for i = 0; i < int32(o.size/4); i++ {
731 c.ctxt.Arch.ByteOrder.PutUint32(bp, out[i])
738 func isint32(v int64) bool {
739 return int64(int32(v)) == v
742 func isuint32(v uint64) bool {
743 return uint64(uint32(v)) == v
746 func (c *ctxt9) aclass(a *obj.Addr) int {
752 if REG_R0 <= a.Reg && a.Reg <= REG_R31 {
755 if REG_F0 <= a.Reg && a.Reg <= REG_F31 {
758 if REG_V0 <= a.Reg && a.Reg <= REG_V31 {
761 if REG_VS0 <= a.Reg && a.Reg <= REG_VS63 {
764 if REG_CR0 <= a.Reg && a.Reg <= REG_CR7 || a.Reg == REG_CR {
767 if REG_SPR0 <= a.Reg && a.Reg <= REG_SPR0+1023 {
782 if REG_DCR0 <= a.Reg && a.Reg <= REG_DCR0+1023 {
785 if a.Reg == REG_FPSCR {
792 case obj.NAME_GOTREF, obj.NAME_TOCREF:
795 case obj.NAME_EXTERN,
797 c.instoffset = a.Offset
800 } else if a.Sym.Type == objabi.STLSBSS {
801 // For PIC builds, use 12 byte got initial-exec TLS accesses.
802 if c.ctxt.Flag_shared {
805 // Otherwise, use 8 byte local-exec TLS accesses.
812 c.instoffset = int64(c.autosize) + a.Offset
813 if c.instoffset >= -BIG && c.instoffset < BIG {
819 c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.FixedFrameSize()
820 if c.instoffset >= -BIG && c.instoffset < BIG {
826 c.instoffset = a.Offset
827 if c.instoffset == 0 {
830 if c.instoffset >= -BIG && c.instoffset < BIG {
838 case obj.TYPE_TEXTSIZE:
841 case obj.TYPE_FCONST:
842 // The only cases where FCONST will occur are with float64 +/- 0.
843 // All other float constants are generated in memory.
844 f64 := a.Val.(float64)
846 if math.Signbit(f64) {
851 log.Fatalf("Unexpected nonzero FCONST operand %v", a)
857 c.instoffset = a.Offset
859 if -BIG <= c.instoffset && c.instoffset <= BIG {
862 if isint32(c.instoffset) {
868 case obj.NAME_EXTERN,
874 c.instoffset = a.Offset
878 c.instoffset = int64(c.autosize) + a.Offset
879 if c.instoffset >= -BIG && c.instoffset < BIG {
885 c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.FixedFrameSize()
886 if c.instoffset >= -BIG && c.instoffset < BIG {
895 if c.instoffset >= 0 {
896 if c.instoffset == 0 {
899 if c.instoffset <= 0x7fff {
902 if c.instoffset <= 0xffff {
905 if c.instoffset&0xffff == 0 && isuint32(uint64(c.instoffset)) { /* && (instoffset & (1<<31)) == 0) */
908 if isint32(c.instoffset) || isuint32(uint64(c.instoffset)) {
914 if c.instoffset >= -0x8000 {
917 if c.instoffset&0xffff == 0 && isint32(c.instoffset) {
920 if isint32(c.instoffset) {
925 case obj.TYPE_BRANCH:
926 if a.Sym != nil && c.ctxt.Flag_dynlink {
935 func prasm(p *obj.Prog) {
936 fmt.Printf("%v\n", p)
939 func (c *ctxt9) oplook(p *obj.Prog) *Optab {
944 a1 = int(p.From.Class)
946 a1 = c.aclass(&p.From) + 1
947 p.From.Class = int8(a1)
951 argsv := [3]int{C_NONE + 1, C_NONE + 1, C_NONE + 1}
952 for i, ap := range p.RestArgs {
953 argsv[i] = int(ap.Addr.Class)
955 argsv[i] = c.aclass(&ap.Addr) + 1
956 ap.Addr.Class = int8(argsv[i])
964 a6 := int(p.To.Class)
966 a6 = c.aclass(&p.To) + 1
967 p.To.Class = int8(a6)
973 if REG_R0 <= p.Reg && p.Reg <= REG_R31 {
975 } else if REG_V0 <= p.Reg && p.Reg <= REG_V31 {
977 } else if REG_VS0 <= p.Reg && p.Reg <= REG_VS63 {
979 } else if REG_F0 <= p.Reg && p.Reg <= REG_F31 {
984 // c.ctxt.Logf("oplook %v %d %d %d %d\n", p, a1, a2, a3, a4, a5, a6)
985 ops := oprange[p.As&obj.AMask]
993 if int(op.a2) == a2 && c1[op.a1] && c3[op.a3] && c4[op.a4] && c5[op.a5] && c6[op.a6] {
994 p.Optab = uint16(cap(optab) - cap(ops) + i + 1)
999 c.ctxt.Diag("illegal combination %v %v %v %v %v %v %v", p.As, DRconv(a1), DRconv(a2), DRconv(a3), DRconv(a4), DRconv(a5), DRconv(a6))
1007 func cmp(a int, b int) bool {
1013 if b == C_ZCON || b == C_SCON || b == C_UCON || b == C_ADDCON || b == C_ANDCON {
1018 if b == C_ZCON || b == C_SCON {
1023 if b == C_ZCON || b == C_SCON {
1028 if b == C_LR || b == C_XER || b == C_CTR {
1058 if b == C_SOREG || b == C_ZOREG {
1064 return r0iszero != 0 /*TypeKind(100016)*/
1076 func (x ocmp) Len() int {
1080 func (x ocmp) Swap(i, j int) {
1081 x[i], x[j] = x[j], x[i]
1084 // Used when sorting the optab. Sorting is
1085 // done in a way so that the best choice of
1086 // opcode/operand combination is considered first.
1087 func (x ocmp) Less(i, j int) bool {
1090 n := int(p1.as) - int(p2.as)
1095 // Consider those that generate fewer
1096 // instructions first.
1097 n = int(p1.size) - int(p2.size)
1101 // operand order should match
1102 // better choices first
1103 n = int(p1.a1) - int(p2.a1)
1107 n = int(p1.a2) - int(p2.a2)
1111 n = int(p1.a3) - int(p2.a3)
1115 n = int(p1.a4) - int(p2.a4)
1119 n = int(p1.a5) - int(p2.a5)
1123 n = int(p1.a6) - int(p2.a6)
1130 // Add an entry to the opcode table for
1131 // a new opcode b0 with the same operand combinations
1133 func opset(a, b0 obj.As) {
1134 oprange[a&obj.AMask] = oprange[b0]
1137 // Build the opcode table
1138 func buildop(ctxt *obj.Link) {
1139 if oprange[AANDN&obj.AMask] != nil {
1140 // Already initialized; stop now.
1141 // This happens in the cmd/asm tests,
1142 // each of which re-initializes the arch.
1148 for i := 0; i < C_NCLASS; i++ {
1149 for n = 0; n < C_NCLASS; n++ {
1155 for n = 0; optab[n].as != obj.AXXX; n++ {
1157 sort.Sort(ocmp(optab[:n]))
1158 for i := 0; i < n; i++ {
1162 for optab[i].as == r {
1165 oprange[r0] = optab[start:i]
1170 ctxt.Diag("unknown op in build: %v", r)
1171 log.Fatalf("instruction missing from switch in asm9.go:buildop: %v", r)
1173 case ADCBF: /* unary indexed: op (b+a); op (b) */
1182 case AECOWX: /* indexed store: op s,(b+a); op s,(b) */
1188 case AREM: /* macro */
1200 case ADIVW: /* op Rb[,Ra],Rd */
1205 opset(AMULHWUCC, r0)
1207 opset(AMULLWVCC, r0)
1215 opset(ADIVWUVCC, r0)
1232 opset(AMULHDUCC, r0)
1234 opset(AMULLDVCC, r0)
1241 opset(ADIVDEUCC, r0)
1246 opset(ADIVDUVCC, r0)
1258 case APOPCNTD: /* popcntd, popcntw, popcntb, cnttzw, cnttzd */
1262 opset(ACNTTZWCC, r0)
1264 opset(ACNTTZDCC, r0)
1266 case ACOPY: /* copy, paste. */
1269 case AMADDHD: /* maddhd, maddhdu, maddld */
1273 case AMOVBZ: /* lbz, stz, rlwm(r/r), lhz, lha, stz, and x variants */
1277 case AMOVBZU: /* lbz[x]u, stb[x]u, lhz[x]u, lha[x]u, sth[u]x, ld[x]u, std[u]x */
1286 case ALV: /* lvebx, lvehx, lvewx, lvx, lvxl, lvsl, lvsr */
1295 case ASTV: /* stvebx, stvehx, stvewx, stvx, stvxl */
1302 case AVAND: /* vand, vandc, vnand */
1307 case AVMRGOW: /* vmrgew, vmrgow */
1310 case AVOR: /* vor, vorc, vxor, vnor, veqv */
1317 case AVADDUM: /* vaddubm, vadduhm, vadduwm, vaddudm, vadduqm */
1324 case AVADDCU: /* vaddcuq, vaddcuw */
1328 case AVADDUS: /* vaddubs, vadduhs, vadduws */
1333 case AVADDSS: /* vaddsbs, vaddshs, vaddsws */
1338 case AVADDE: /* vaddeuqm, vaddecuq */
1339 opset(AVADDEUQM, r0)
1340 opset(AVADDECUQ, r0)
1342 case AVSUBUM: /* vsububm, vsubuhm, vsubuwm, vsubudm, vsubuqm */
1349 case AVSUBCU: /* vsubcuq, vsubcuw */
1353 case AVSUBUS: /* vsububs, vsubuhs, vsubuws */
1358 case AVSUBSS: /* vsubsbs, vsubshs, vsubsws */
1363 case AVSUBE: /* vsubeuqm, vsubecuq */
1364 opset(AVSUBEUQM, r0)
1365 opset(AVSUBECUQ, r0)
1367 case AVMULESB: /* vmulesb, vmulosb, vmuleub, vmuloub, vmulosh, vmulouh, vmulesw, vmulosw, vmuleuw, vmulouw, vmuluwm */
1380 case AVPMSUM: /* vpmsumb, vpmsumh, vpmsumw, vpmsumd */
1386 case AVR: /* vrlb, vrlh, vrlw, vrld */
1392 case AVS: /* vs[l,r], vs[l,r]o, vs[l,r]b, vs[l,r]h, vs[l,r]w, vs[l,r]d */
1406 case AVSA: /* vsrab, vsrah, vsraw, vsrad */
1412 case AVSOI: /* vsldoi */
1415 case AVCLZ: /* vclzb, vclzh, vclzw, vclzd */
1421 case AVPOPCNT: /* vpopcntb, vpopcnth, vpopcntw, vpopcntd */
1422 opset(AVPOPCNTB, r0)
1423 opset(AVPOPCNTH, r0)
1424 opset(AVPOPCNTW, r0)
1425 opset(AVPOPCNTD, r0)
1427 case AVCMPEQ: /* vcmpequb[.], vcmpequh[.], vcmpequw[.], vcmpequd[.] */
1428 opset(AVCMPEQUB, r0)
1429 opset(AVCMPEQUBCC, r0)
1430 opset(AVCMPEQUH, r0)
1431 opset(AVCMPEQUHCC, r0)
1432 opset(AVCMPEQUW, r0)
1433 opset(AVCMPEQUWCC, r0)
1434 opset(AVCMPEQUD, r0)
1435 opset(AVCMPEQUDCC, r0)
1437 case AVCMPGT: /* vcmpgt[u,s]b[.], vcmpgt[u,s]h[.], vcmpgt[u,s]w[.], vcmpgt[u,s]d[.] */
1438 opset(AVCMPGTUB, r0)
1439 opset(AVCMPGTUBCC, r0)
1440 opset(AVCMPGTUH, r0)
1441 opset(AVCMPGTUHCC, r0)
1442 opset(AVCMPGTUW, r0)
1443 opset(AVCMPGTUWCC, r0)
1444 opset(AVCMPGTUD, r0)
1445 opset(AVCMPGTUDCC, r0)
1446 opset(AVCMPGTSB, r0)
1447 opset(AVCMPGTSBCC, r0)
1448 opset(AVCMPGTSH, r0)
1449 opset(AVCMPGTSHCC, r0)
1450 opset(AVCMPGTSW, r0)
1451 opset(AVCMPGTSWCC, r0)
1452 opset(AVCMPGTSD, r0)
1453 opset(AVCMPGTSDCC, r0)
1455 case AVCMPNEZB: /* vcmpnezb[.] */
1456 opset(AVCMPNEZBCC, r0)
1458 opset(AVCMPNEBCC, r0)
1460 opset(AVCMPNEHCC, r0)
1462 opset(AVCMPNEWCC, r0)
1464 case AVPERM: /* vperm */
1465 opset(AVPERMXOR, r0)
1468 case AVBPERMQ: /* vbpermq, vbpermd */
1471 case AVSEL: /* vsel */
1474 case AVSPLTB: /* vspltb, vsplth, vspltw */
1478 case AVSPLTISB: /* vspltisb, vspltish, vspltisw */
1479 opset(AVSPLTISH, r0)
1480 opset(AVSPLTISW, r0)
1482 case AVCIPH: /* vcipher, vcipherlast */
1484 opset(AVCIPHERLAST, r0)
1486 case AVNCIPH: /* vncipher, vncipherlast */
1487 opset(AVNCIPHER, r0)
1488 opset(AVNCIPHERLAST, r0)
1490 case AVSBOX: /* vsbox */
1493 case AVSHASIGMA: /* vshasigmaw, vshasigmad */
1494 opset(AVSHASIGMAW, r0)
1495 opset(AVSHASIGMAD, r0)
1497 case ALXVD2X: /* lxvd2x, lxvdsx, lxvw4x, lxvh8x, lxvb16x */
1503 case ALXV: /* lxv */
1506 case ALXVL: /* lxvl, lxvll, lxvx */
1510 case ASTXVD2X: /* stxvd2x, stxvdsx, stxvw4x, stxvh8x, stxvb16x */
1513 opset(ASTXVB16X, r0)
1515 case ASTXV: /* stxv */
1518 case ASTXVL: /* stxvl, stxvll, stvx */
1522 case ALXSDX: /* lxsdx */
1525 case ASTXSDX: /* stxsdx */
1528 case ALXSIWAX: /* lxsiwax, lxsiwzx */
1531 case ASTXSIWX: /* stxsiwx */
1534 case AMFVSRD: /* mfvsrd, mfvsrwz (and extended mnemonics), mfvsrld */
1540 case AMTVSRD: /* mtvsrd, mtvsrwa, mtvsrwz (and extended mnemonics), mtvsrdd, mtvsrws */
1548 case AXXLAND: /* xxland, xxlandc, xxleqv, xxlnand */
1553 case AXXLOR: /* xxlorc, xxlnor, xxlor, xxlxor */
1559 case AXXSEL: /* xxsel */
1562 case AXXMRGHW: /* xxmrghw, xxmrglw */
1565 case AXXSPLTW: /* xxspltw */
1568 case AXXSPLTIB: /* xxspltib */
1569 opset(AXXSPLTIB, r0)
1571 case AXXPERM: /* xxpermdi */
1574 case AXXSLDWI: /* xxsldwi */
1575 opset(AXXPERMDI, r0)
1578 case AXXBRQ: /* xxbrq, xxbrd, xxbrw, xxbrh */
1583 case AXSCVDPSP: /* xscvdpsp, xscvspdp, xscvdpspn, xscvspdpn */
1584 opset(AXSCVSPDP, r0)
1585 opset(AXSCVDPSPN, r0)
1586 opset(AXSCVSPDPN, r0)
1588 case AXVCVDPSP: /* xvcvdpsp, xvcvspdp */
1589 opset(AXVCVSPDP, r0)
1591 case AXSCVDPSXDS: /* xscvdpsxds, xscvdpsxws, xscvdpuxds, xscvdpuxws */
1592 opset(AXSCVDPSXWS, r0)
1593 opset(AXSCVDPUXDS, r0)
1594 opset(AXSCVDPUXWS, r0)
1596 case AXSCVSXDDP: /* xscvsxddp, xscvuxddp, xscvsxdsp, xscvuxdsp */
1597 opset(AXSCVUXDDP, r0)
1598 opset(AXSCVSXDSP, r0)
1599 opset(AXSCVUXDSP, r0)
1601 case AXVCVDPSXDS: /* xvcvdpsxds, xvcvdpsxws, xvcvdpuxds, xvcvdpuxws, xvcvspsxds, xvcvspsxws, xvcvspuxds, xvcvspuxws */
1602 opset(AXVCVDPSXDS, r0)
1603 opset(AXVCVDPSXWS, r0)
1604 opset(AXVCVDPUXDS, r0)
1605 opset(AXVCVDPUXWS, r0)
1606 opset(AXVCVSPSXDS, r0)
1607 opset(AXVCVSPSXWS, r0)
1608 opset(AXVCVSPUXDS, r0)
1609 opset(AXVCVSPUXWS, r0)
1611 case AXVCVSXDDP: /* xvcvsxddp, xvcvsxwdp, xvcvuxddp, xvcvuxwdp, xvcvsxdsp, xvcvsxwsp, xvcvuxdsp, xvcvuxwsp */
1612 opset(AXVCVSXWDP, r0)
1613 opset(AXVCVUXDDP, r0)
1614 opset(AXVCVUXWDP, r0)
1615 opset(AXVCVSXDSP, r0)
1616 opset(AXVCVSXWSP, r0)
1617 opset(AXVCVUXDSP, r0)
1618 opset(AXVCVUXWSP, r0)
1620 case AAND: /* logical op Rb,Rs,Ra; no literal */
1634 case AADDME: /* op Ra, Rd */
1638 opset(AADDMEVCC, r0)
1642 opset(AADDZEVCC, r0)
1646 opset(ASUBMEVCC, r0)
1650 opset(ASUBZEVCC, r0)
1670 case AEXTSB: /* op Rs, Ra */
1676 opset(ACNTLZWCC, r0)
1680 opset(ACNTLZDCC, r0)
1682 case AFABS: /* fop [s,]d */
1694 opset(AFCTIWZCC, r0)
1698 opset(AFCTIDZCC, r0)
1702 opset(AFCFIDUCC, r0)
1704 opset(AFCFIDSCC, r0)
1716 opset(AFRSQRTECC, r0)
1720 opset(AFSQRTSCC, r0)
1727 opset(AFCPSGNCC, r0)
1740 opset(AFMADDSCC, r0)
1744 opset(AFMSUBSCC, r0)
1746 opset(AFNMADDCC, r0)
1748 opset(AFNMADDSCC, r0)
1750 opset(AFNMSUBCC, r0)
1752 opset(AFNMSUBSCC, r0)
1768 opset(AMTFSB0CC, r0)
1770 opset(AMTFSB1CC, r0)
1772 case ANEG: /* op [Ra,] Rd */
1778 case AOR: /* or/xor Rb,Rs,Ra; ori/xori $uimm,Rs,R */
1781 case AORIS: /* oris/xoris $uimm,Rs,Ra */
1796 case ASRAW: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */
1800 opset(AEXTSWSLICC, r0)
1802 case ASRAD: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */
1805 case ASUB: /* SUB Ra,Rb,Rd => subf Rd,ra,rb */
1833 opset(ARLDIMICC, r0)
1844 opset(ARLDICLCC, r0)
1846 opset(ARLDICRCC, r0)
1849 opset(ACLRLSLDI, r0)
1862 case ASYSCALL: /* just the op; flow of control */
1901 case AMOVW: /* load/store/move word with sign extension; move 32-bit literals */
1902 opset(AMOVWZ, r0) /* Same as above, but zero extended */
1906 AANDCC, /* and. Rb,Rs,Ra; andi. $uimm,Rs,Ra */
1911 AMOVD, /* load/store/move 64-bit values, including 32-bit literals with/without sign-extension */
1912 AMOVB, /* macro: move byte with sign extension */
1913 AMOVBU, /* macro: move byte with sign extension & update */
1915 /* op $s[,r2],r3; op r1[,r2],r3; no cc/v */
1916 ASUBC, /* op r1,$s,r3; op r1[,r2],r3 */
1941 func OPVXX1(o uint32, xo uint32, oe uint32) uint32 {
1942 return o<<26 | xo<<1 | oe<<11
1945 func OPVXX2(o uint32, xo uint32, oe uint32) uint32 {
1946 return o<<26 | xo<<2 | oe<<11
1949 func OPVXX2VA(o uint32, xo uint32, oe uint32) uint32 {
1950 return o<<26 | xo<<2 | oe<<16
1953 func OPVXX3(o uint32, xo uint32, oe uint32) uint32 {
1954 return o<<26 | xo<<3 | oe<<11
1957 func OPVXX4(o uint32, xo uint32, oe uint32) uint32 {
1958 return o<<26 | xo<<4 | oe<<11
1961 func OPDQ(o uint32, xo uint32, oe uint32) uint32 {
1962 return o<<26 | xo | oe<<4
1965 func OPVX(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
1966 return o<<26 | xo | oe<<11 | rc&1
1969 func OPVC(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
1970 return o<<26 | xo | oe<<11 | (rc&1)<<10
1973 func OPVCC(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
1974 return o<<26 | xo<<1 | oe<<10 | rc&1
1977 func OPCC(o uint32, xo uint32, rc uint32) uint32 {
1978 return OPVCC(o, xo, 0, rc)
1981 /* Generate MD-form opcode */
1982 func OPMD(o, xo, rc uint32) uint32 {
1983 return o<<26 | xo<<2 | rc&1
1986 /* the order is dest, a/s, b/imm for both arithmetic and logical operations */
1987 func AOP_RRR(op uint32, d uint32, a uint32, b uint32) uint32 {
1988 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11
1991 /* VX-form 2-register operands, r/none/r */
1992 func AOP_RR(op uint32, d uint32, a uint32) uint32 {
1993 return op | (d&31)<<21 | (a&31)<<11
1996 /* VA-form 4-register operands */
1997 func AOP_RRRR(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
1998 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&31)<<6
2001 func AOP_IRR(op uint32, d uint32, a uint32, simm uint32) uint32 {
2002 return op | (d&31)<<21 | (a&31)<<16 | simm&0xFFFF
2005 /* VX-form 2-register + UIM operands */
2006 func AOP_VIRR(op uint32, d uint32, a uint32, simm uint32) uint32 {
2007 return op | (d&31)<<21 | (simm&0xFFFF)<<16 | (a&31)<<11
2010 /* VX-form 2-register + ST + SIX operands */
2011 func AOP_IIRR(op uint32, d uint32, a uint32, sbit uint32, simm uint32) uint32 {
2012 return op | (d&31)<<21 | (a&31)<<16 | (sbit&1)<<15 | (simm&0xF)<<11
2015 /* VA-form 3-register + SHB operands */
2016 func AOP_IRRR(op uint32, d uint32, a uint32, b uint32, simm uint32) uint32 {
2017 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (simm&0xF)<<6
2020 /* VX-form 1-register + SIM operands */
2021 func AOP_IR(op uint32, d uint32, simm uint32) uint32 {
2022 return op | (d&31)<<21 | (simm&31)<<16
2025 /* XX1-form 3-register operands, 1 VSR operand */
2026 func AOP_XX1(op uint32, d uint32, a uint32, b uint32) uint32 {
2027 /* For the XX-form encodings, we need the VSX register number to be exactly */
2028 /* between 0-63, so we can properly set the rightmost bits. */
2030 return op | (r&31)<<21 | (a&31)<<16 | (b&31)<<11 | (r&32)>>5
2033 /* XX2-form 3-register operands, 2 VSR operands */
2034 func AOP_XX2(op uint32, d uint32, a uint32, b uint32) uint32 {
2037 return op | (xt&31)<<21 | (a&3)<<16 | (xb&31)<<11 | (xb&32)>>4 | (xt&32)>>5
2040 /* XX3-form 3 VSR operands */
2041 func AOP_XX3(op uint32, d uint32, a uint32, b uint32) uint32 {
2045 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2048 /* XX3-form 3 VSR operands + immediate */
2049 func AOP_XX3I(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2053 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (c&3)<<8 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2056 /* XX4-form, 4 VSR operands */
2057 func AOP_XX4(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2062 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xc&31)<<6 | (xc&32)>>2 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2065 /* DQ-form, VSR register, register + offset operands */
2066 func AOP_DQ(op uint32, d uint32, a uint32, b uint32) uint32 {
2067 /* For the DQ-form encodings, we need the VSX register number to be exactly */
2068 /* between 0-63, so we can properly set the SX bit. */
2070 /* The EA for this instruction form is (RA) + DQ << 4, where DQ is a 12-bit signed integer. */
2071 /* In order to match the output of the GNU objdump (and make the usage in Go asm easier), the */
2072 /* instruction is called using the sign extended value (i.e. a valid offset would be -32752 or 32752, */
2073 /* not -2047 or 2047), so 'b' needs to be adjusted to the expected 12-bit DQ value. Bear in mind that */
2074 /* bits 0 to 3 in 'dq' need to be zero, otherwise this will generate an illegal instruction. */
2075 /* If in doubt how this instruction form is encoded, refer to ISA 3.0b, pages 492 and 507. */
2077 return op | (r&31)<<21 | (a&31)<<16 | (dq&4095)<<4 | (r&32)>>2
2080 /* Z23-form, 3-register operands + CY field */
2081 func AOP_Z23I(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2082 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&3)<<9
2085 /* X-form, 3-register operands + EH field */
2086 func AOP_RRRI(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2087 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c & 1)
2090 func LOP_RRR(op uint32, a uint32, s uint32, b uint32) uint32 {
2091 return op | (s&31)<<21 | (a&31)<<16 | (b&31)<<11
2094 func LOP_IRR(op uint32, a uint32, s uint32, uimm uint32) uint32 {
2095 return op | (s&31)<<21 | (a&31)<<16 | uimm&0xFFFF
2098 func OP_BR(op uint32, li uint32, aa uint32) uint32 {
2099 return op | li&0x03FFFFFC | aa<<1
2102 func OP_BC(op uint32, bo uint32, bi uint32, bd uint32, aa uint32) uint32 {
2103 return op | (bo&0x1F)<<21 | (bi&0x1F)<<16 | bd&0xFFFC | aa<<1
2106 func OP_BCR(op uint32, bo uint32, bi uint32) uint32 {
2107 return op | (bo&0x1F)<<21 | (bi&0x1F)<<16
2110 func OP_RLW(op uint32, a uint32, s uint32, sh uint32, mb uint32, me uint32) uint32 {
2111 return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | (mb&31)<<6 | (me&31)<<1
2114 func AOP_RLDIC(op uint32, a uint32, s uint32, sh uint32, m uint32) uint32 {
2115 return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1 | (m&31)<<6 | ((m&32)>>5)<<5
2118 func AOP_EXTSWSLI(op uint32, a uint32, s uint32, sh uint32) uint32 {
2119 return op | (a&31)<<21 | (s&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1
2122 func AOP_ISEL(op uint32, t uint32, a uint32, b uint32, bc uint32) uint32 {
2123 return op | (t&31)<<21 | (a&31)<<16 | (b&31)<<11 | (bc&0x1F)<<6
2127 /* each rhs is OPVCC(_, _, _, _) */
2128 OP_ADD = 31<<26 | 266<<1 | 0<<10 | 0
2129 OP_ADDI = 14<<26 | 0<<1 | 0<<10 | 0
2130 OP_ADDIS = 15<<26 | 0<<1 | 0<<10 | 0
2131 OP_ANDI = 28<<26 | 0<<1 | 0<<10 | 0
2132 OP_EXTSB = 31<<26 | 954<<1 | 0<<10 | 0
2133 OP_EXTSH = 31<<26 | 922<<1 | 0<<10 | 0
2134 OP_EXTSW = 31<<26 | 986<<1 | 0<<10 | 0
2135 OP_ISEL = 31<<26 | 15<<1 | 0<<10 | 0
2136 OP_MCRF = 19<<26 | 0<<1 | 0<<10 | 0
2137 OP_MCRFS = 63<<26 | 64<<1 | 0<<10 | 0
2138 OP_MCRXR = 31<<26 | 512<<1 | 0<<10 | 0
2139 OP_MFCR = 31<<26 | 19<<1 | 0<<10 | 0
2140 OP_MFFS = 63<<26 | 583<<1 | 0<<10 | 0
2141 OP_MFSPR = 31<<26 | 339<<1 | 0<<10 | 0
2142 OP_MFSR = 31<<26 | 595<<1 | 0<<10 | 0
2143 OP_MFSRIN = 31<<26 | 659<<1 | 0<<10 | 0
2144 OP_MTCRF = 31<<26 | 144<<1 | 0<<10 | 0
2145 OP_MTFSF = 63<<26 | 711<<1 | 0<<10 | 0
2146 OP_MTFSFI = 63<<26 | 134<<1 | 0<<10 | 0
2147 OP_MTSPR = 31<<26 | 467<<1 | 0<<10 | 0
2148 OP_MTSR = 31<<26 | 210<<1 | 0<<10 | 0
2149 OP_MTSRIN = 31<<26 | 242<<1 | 0<<10 | 0
2150 OP_MULLW = 31<<26 | 235<<1 | 0<<10 | 0
2151 OP_MULLD = 31<<26 | 233<<1 | 0<<10 | 0
2152 OP_OR = 31<<26 | 444<<1 | 0<<10 | 0
2153 OP_ORI = 24<<26 | 0<<1 | 0<<10 | 0
2154 OP_ORIS = 25<<26 | 0<<1 | 0<<10 | 0
2155 OP_RLWINM = 21<<26 | 0<<1 | 0<<10 | 0
2156 OP_RLWNM = 23<<26 | 0<<1 | 0<<10 | 0
2157 OP_SUBF = 31<<26 | 40<<1 | 0<<10 | 0
2158 OP_RLDIC = 30<<26 | 4<<1 | 0<<10 | 0
2159 OP_RLDICR = 30<<26 | 2<<1 | 0<<10 | 0
2160 OP_RLDICL = 30<<26 | 0<<1 | 0<<10 | 0
2161 OP_RLDCL = 30<<26 | 8<<1 | 0<<10 | 0
2162 OP_EXTSWSLI = 31<<26 | 445<<2
2165 func oclass(a *obj.Addr) int {
2166 return int(a.Class) - 1
2174 // This function determines when a non-indexed load or store is D or
2175 // DS form for use in finding the size of the offset field in the instruction.
2176 // The size is needed when setting the offset value in the instruction
2177 // and when generating relocation for that field.
2178 // DS form instructions include: ld, ldu, lwa, std, stdu. All other
2179 // loads and stores with an offset field are D form. This function should
2180 // only be called with the same opcodes as are handled by opstore and opload.
2181 func (c *ctxt9) opform(insn uint32) int {
2184 c.ctxt.Diag("bad insn in loadform: %x", insn)
2185 case OPVCC(58, 0, 0, 0), // ld
2186 OPVCC(58, 0, 0, 1), // ldu
2187 OPVCC(58, 0, 0, 0) | 1<<1, // lwa
2188 OPVCC(62, 0, 0, 0), // std
2189 OPVCC(62, 0, 0, 1): //stdu
2191 case OP_ADDI, // add
2192 OPVCC(32, 0, 0, 0), // lwz
2193 OPVCC(33, 0, 0, 0), // lwzu
2194 OPVCC(34, 0, 0, 0), // lbz
2195 OPVCC(35, 0, 0, 0), // lbzu
2196 OPVCC(40, 0, 0, 0), // lhz
2197 OPVCC(41, 0, 0, 0), // lhzu
2198 OPVCC(42, 0, 0, 0), // lha
2199 OPVCC(43, 0, 0, 0), // lhau
2200 OPVCC(46, 0, 0, 0), // lmw
2201 OPVCC(48, 0, 0, 0), // lfs
2202 OPVCC(49, 0, 0, 0), // lfsu
2203 OPVCC(50, 0, 0, 0), // lfd
2204 OPVCC(51, 0, 0, 0), // lfdu
2205 OPVCC(36, 0, 0, 0), // stw
2206 OPVCC(37, 0, 0, 0), // stwu
2207 OPVCC(38, 0, 0, 0), // stb
2208 OPVCC(39, 0, 0, 0), // stbu
2209 OPVCC(44, 0, 0, 0), // sth
2210 OPVCC(45, 0, 0, 0), // sthu
2211 OPVCC(47, 0, 0, 0), // stmw
2212 OPVCC(52, 0, 0, 0), // stfs
2213 OPVCC(53, 0, 0, 0), // stfsu
2214 OPVCC(54, 0, 0, 0), // stfd
2215 OPVCC(55, 0, 0, 0): // stfdu
2221 // Encode instructions and create relocation for accessing s+d according to the
2222 // instruction op with source or destination (as appropriate) register reg.
2223 func (c *ctxt9) symbolAccess(s *obj.LSym, d int64, reg int16, op uint32, reuse bool) (o1, o2 uint32) {
2224 if c.ctxt.Headtype == objabi.Haix {
2225 // Every symbol access must be made via a TOC anchor.
2226 c.ctxt.Diag("symbolAccess called for %s", s.Name)
2229 form := c.opform(op)
2230 if c.ctxt.Flag_shared {
2235 // If reg can be reused when computing the symbol address,
2236 // use it instead of REGTMP.
2238 o1 = AOP_IRR(OP_ADDIS, REGTMP, base, 0)
2239 o2 = AOP_IRR(op, uint32(reg), REGTMP, 0)
2241 o1 = AOP_IRR(OP_ADDIS, uint32(reg), base, 0)
2242 o2 = AOP_IRR(op, uint32(reg), uint32(reg), 0)
2244 rel := obj.Addrel(c.cursym)
2245 rel.Off = int32(c.pc)
2249 if c.ctxt.Flag_shared {
2252 rel.Type = objabi.R_ADDRPOWER_TOCREL
2254 rel.Type = objabi.R_ADDRPOWER_TOCREL_DS
2260 rel.Type = objabi.R_ADDRPOWER
2262 rel.Type = objabi.R_ADDRPOWER_DS
2271 func getmask(m []byte, v uint32) bool {
2274 if v != ^uint32(0) && v&(1<<31) != 0 && v&1 != 0 { /* MB > ME */
2285 for i := 0; i < 32; i++ {
2286 if v&(1<<uint(31-i)) != 0 {
2291 if i >= 32 || v&(1<<uint(31-i)) == 0 {
2297 if v&(1<<uint(31-i)) != 0 {
2308 func (c *ctxt9) maskgen(p *obj.Prog, m []byte, v uint32) {
2310 c.ctxt.Diag("cannot generate mask #%x\n%v", v, p)
2315 * 64-bit masks (rldic etc)
2317 func getmask64(m []byte, v uint64) bool {
2320 for i := 0; i < 64; i++ {
2321 if v&(uint64(1)<<uint(63-i)) != 0 {
2326 if i >= 64 || v&(uint64(1)<<uint(63-i)) == 0 {
2332 if v&(uint64(1)<<uint(63-i)) != 0 {
2343 func (c *ctxt9) maskgen64(p *obj.Prog, m []byte, v uint64) {
2344 if !getmask64(m, v) {
2345 c.ctxt.Diag("cannot generate mask #%x\n%v", v, p)
2349 func loadu32(r int, d int64) uint32 {
2351 if isuint32(uint64(d)) {
2352 return LOP_IRR(OP_ORIS, uint32(r), REGZERO, uint32(v))
2354 return AOP_IRR(OP_ADDIS, uint32(r), REGZERO, uint32(v))
2357 func high16adjusted(d int32) uint16 {
2359 return uint16((d >> 16) + 1)
2361 return uint16(d >> 16)
2364 func (c *ctxt9) asmout(p *obj.Prog, o *Optab, out []uint32) {
2371 //print("%v => case %d\n", p, o->type);
2374 c.ctxt.Diag("unknown type %d", o.type_)
2377 case 0: /* pseudo ops */
2380 case 2: /* int/cr/fp op Rb,[Ra],Rd */
2386 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2388 case 3: /* mov $soreg/addcon/andcon/ucon, r ==> addis/oris/addi/ori $i,reg',r */
2389 d := c.vregoff(&p.From)
2392 r := int(p.From.Reg)
2394 r = c.getimpliedreg(&p.From, p)
2396 if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 && (r != 0 || v != 0) {
2397 c.ctxt.Diag("literal operation on R0\n%v", p)
2402 log.Fatalf("invalid handling of %v", p)
2404 // For UCON operands the value is right shifted 16, using ADDIS if the
2405 // value should be signed, ORIS if unsigned.
2407 if r == REGZERO && isuint32(uint64(d)) {
2408 o1 = LOP_IRR(OP_ORIS, uint32(p.To.Reg), REGZERO, uint32(v))
2413 } else if int64(int16(d)) != d {
2414 // Operand is 16 bit value with sign bit set
2415 if o.a1 == C_ANDCON {
2416 // Needs unsigned 16 bit so use ORI
2417 if r == 0 || r == REGZERO {
2418 o1 = LOP_IRR(uint32(OP_ORI), uint32(p.To.Reg), uint32(0), uint32(v))
2421 // With ADDCON, needs signed 16 bit value, fall through to use ADDI
2422 } else if o.a1 != C_ADDCON {
2423 log.Fatalf("invalid handling of %v", p)
2427 o1 = AOP_IRR(uint32(a), uint32(p.To.Reg), uint32(r), uint32(v))
2429 case 4: /* add/mul $scon,[r1],r2 */
2430 v := c.regoff(&p.From)
2436 if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 {
2437 c.ctxt.Diag("literal operation on R0\n%v", p)
2439 if int32(int16(v)) != v {
2440 log.Fatalf("mishandled instruction %v", p)
2442 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2444 case 5: /* syscall */
2447 case 6: /* logical op Rb,[Rs,]Ra; no literal */
2453 // AROTL and AROTLW are extended mnemonics, which map to RLDCL and RLWNM.
2456 o1 = AOP_RLDIC(OP_RLDCL, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), uint32(0))
2458 o1 = OP_RLW(OP_RLWNM, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), 0, 31)
2460 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2463 case 7: /* mov r, soreg ==> stw o(r) */
2467 r = c.getimpliedreg(&p.To, p)
2469 v := c.regoff(&p.To)
2470 if p.To.Type == obj.TYPE_MEM && p.To.Index != 0 {
2472 c.ctxt.Diag("illegal indexed instruction\n%v", p)
2474 o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(r))
2476 if int32(int16(v)) != v {
2477 log.Fatalf("mishandled instruction %v", p)
2479 // Offsets in DS form stores must be a multiple of 4
2480 inst := c.opstore(p.As)
2481 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
2482 log.Fatalf("invalid offset for DS form load/store %v", p)
2484 o1 = AOP_IRR(inst, uint32(p.From.Reg), uint32(r), uint32(v))
2487 case 8: /* mov soreg, r ==> lbz/lhz/lwz o(r), lbz o(r) + extsb r,r */
2488 r := int(p.From.Reg)
2491 r = c.getimpliedreg(&p.From, p)
2493 v := c.regoff(&p.From)
2494 if p.From.Type == obj.TYPE_MEM && p.From.Index != 0 {
2496 c.ctxt.Diag("illegal indexed instruction\n%v", p)
2498 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(r))
2500 if int32(int16(v)) != v {
2501 log.Fatalf("mishandled instruction %v", p)
2503 // Offsets in DS form loads must be a multiple of 4
2504 inst := c.opload(p.As)
2505 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
2506 log.Fatalf("invalid offset for DS form load/store %v", p)
2508 o1 = AOP_IRR(inst, uint32(p.To.Reg), uint32(r), uint32(v))
2511 // Sign extend MOVB operations. This is ignored for other cases (o.size == 4).
2512 o2 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
2514 case 10: /* sub Ra,[Rb],Rd => subf Rd,Ra,Rb */
2520 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(r))
2522 case 11: /* br/bl lbra */
2525 if p.To.Target() != nil {
2526 v = int32(p.To.Target().Pc - p.Pc)
2528 c.ctxt.Diag("odd branch target address\n%v", p)
2532 if v < -(1<<25) || v >= 1<<24 {
2533 c.ctxt.Diag("branch too far\n%v", p)
2537 o1 = OP_BR(c.opirr(p.As), uint32(v), 0)
2538 if p.To.Sym != nil {
2539 rel := obj.Addrel(c.cursym)
2540 rel.Off = int32(c.pc)
2543 v += int32(p.To.Offset)
2545 c.ctxt.Diag("odd branch target address\n%v", p)
2550 rel.Type = objabi.R_CALLPOWER
2552 o2 = 0x60000000 // nop, sometimes overwritten by ld r2, 24(r1) when dynamic linking
2554 case 13: /* mov[bhwd]{z,} r,r */
2555 // This needs to handle "MOV* $0, Rx". This shows up because $0 also
2556 // matches C_REG if r0iszero. This happens because C_REG sorts before C_ANDCON
2557 // TODO: fix the above behavior and cleanup this exception.
2558 if p.From.Type == obj.TYPE_CONST {
2559 o1 = LOP_IRR(OP_ADDI, REGZERO, uint32(p.To.Reg), 0)
2562 if p.To.Type == obj.TYPE_CONST {
2563 c.ctxt.Diag("cannot move into constant 0\n%v", p)
2568 o1 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2570 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 24, 31)
2572 o1 = LOP_RRR(OP_EXTSH, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2574 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 16, 31)
2576 o1 = LOP_RRR(OP_EXTSW, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2578 o1 = OP_RLW(OP_RLDIC, uint32(p.To.Reg), uint32(p.From.Reg), 0, 0, 0) | 1<<5 /* MB=32 */
2580 o1 = LOP_RRR(OP_OR, uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.From.Reg))
2582 c.ctxt.Diag("internal: bad register move/truncation\n%v", p)
2585 case 14: /* rldc[lr] Rb,Rs,$mask,Ra -- left, right give different masks */
2591 d := c.vregoff(p.GetFrom3())
2595 // These opcodes expect a mask operand that has to be converted into the
2596 // appropriate operand. The way these were defined, not all valid masks are possible.
2597 // Left here for compatibility in case they were used or generated.
2598 case ARLDCL, ARLDCLCC:
2600 c.maskgen64(p, mask[:], uint64(d))
2602 a = int(mask[0]) /* MB */
2604 c.ctxt.Diag("invalid mask for rotate: %x (end != bit 63)\n%v", uint64(d), p)
2606 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2607 o1 |= (uint32(a) & 31) << 6
2609 o1 |= 1 << 5 /* mb[5] is top bit */
2612 case ARLDCR, ARLDCRCC:
2614 c.maskgen64(p, mask[:], uint64(d))
2616 a = int(mask[1]) /* ME */
2618 c.ctxt.Diag("invalid mask for rotate: %x %x (start != 0)\n%v", uint64(d), mask[0], p)
2620 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2621 o1 |= (uint32(a) & 31) << 6
2623 o1 |= 1 << 5 /* mb[5] is top bit */
2626 // These opcodes use a shift count like the ppc64 asm, no mask conversion done
2627 case ARLDICR, ARLDICRCC:
2629 sh := c.regoff(&p.From)
2630 if me < 0 || me > 63 || sh > 63 {
2631 c.ctxt.Diag("Invalid me or sh for RLDICR: %x %x\n%v", int(d), sh, p)
2633 o1 = AOP_RLDIC(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(sh), uint32(me))
2635 case ARLDICL, ARLDICLCC, ARLDIC, ARLDICCC:
2637 sh := c.regoff(&p.From)
2638 if mb < 0 || mb > 63 || sh > 63 {
2639 c.ctxt.Diag("Invalid mb or sh for RLDIC, RLDICL: %x %x\n%v", mb, sh, p)
2641 o1 = AOP_RLDIC(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(sh), uint32(mb))
2644 // This is an extended mnemonic defined in the ISA section C.8.1
2645 // clrlsldi ra,rs,b,n --> rldic ra,rs,n,b-n
2646 // It maps onto RLDIC so is directly generated here based on the operands from
2649 b := c.regoff(&p.From)
2650 if n > b || b > 63 {
2651 c.ctxt.Diag("Invalid n or b for CLRLSLDI: %x %x\n%v", n, b, p)
2653 o1 = AOP_RLDIC(OP_RLDIC, uint32(p.To.Reg), uint32(r), uint32(n), uint32(b)-uint32(n))
2656 c.ctxt.Diag("unexpected op in rldc case\n%v", p)
2660 case 17, /* bc bo,bi,lbra (same for now) */
2661 16: /* bc bo,bi,sbra */
2666 if p.From.Type == obj.TYPE_CONST {
2667 a = int(c.regoff(&p.From))
2668 } else if p.From.Type == obj.TYPE_REG {
2670 c.ctxt.Diag("unexpected register setting for branch with CR: %d\n", r)
2672 // BI values for the CR
2691 c.ctxt.Diag("unrecognized register: expecting CR\n")
2695 if p.To.Target() != nil {
2696 v = int32(p.To.Target().Pc - p.Pc)
2699 c.ctxt.Diag("odd branch target address\n%v", p)
2703 if v < -(1<<16) || v >= 1<<15 {
2704 c.ctxt.Diag("branch too far\n%v", p)
2706 o1 = OP_BC(c.opirr(p.As), uint32(a), uint32(r), uint32(v), 0)
2708 case 15: /* br/bl (r) => mov r,lr; br/bl (lr) */
2710 if p.As == ABC || p.As == ABCL {
2711 v = c.regoff(&p.To) & 31
2713 v = 20 /* unconditional */
2715 o1 = AOP_RRR(OP_MTSPR, uint32(p.To.Reg), 0, 0) | (REG_LR&0x1f)<<16 | ((REG_LR>>5)&0x1f)<<11
2716 o2 = OPVCC(19, 16, 0, 0)
2717 if p.As == ABL || p.As == ABCL {
2720 o2 = OP_BCR(o2, uint32(v), uint32(p.To.Index))
2722 case 18: /* br/bl (lr/ctr); bc/bcl bo,bi,(lr/ctr) */
2725 if p.As == ABC || p.As == ABCL {
2726 v = c.regoff(&p.From) & 31
2728 v = 20 /* unconditional */
2734 switch oclass(&p.To) {
2736 o1 = OPVCC(19, 528, 0, 0)
2739 o1 = OPVCC(19, 16, 0, 0)
2742 c.ctxt.Diag("bad optab entry (18): %d\n%v", p.To.Class, p)
2746 // Insert optional branch hint for bclr[l]/bcctr[l]
2747 if p.From3Type() != obj.TYPE_NONE {
2748 bh = uint32(p.GetFrom3().Offset)
2749 if bh == 2 || bh > 3 {
2750 log.Fatalf("BH must be 0,1,3 for %v", p)
2755 if p.As == ABL || p.As == ABCL {
2758 o1 = OP_BCR(o1, uint32(v), uint32(r))
2760 case 19: /* mov $lcon,r ==> cau+or */
2761 d := c.vregoff(&p.From)
2762 o1 = loadu32(int(p.To.Reg), d)
2763 o2 = LOP_IRR(OP_ORI, uint32(p.To.Reg), uint32(p.To.Reg), uint32(int32(d)))
2765 case 20: /* add $ucon,,r | addis $addcon,r,r */
2766 v := c.regoff(&p.From)
2772 if p.As == AADD && (r0iszero == 0 /*TypeKind(100016)*/ && p.Reg == 0 || r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0) {
2773 c.ctxt.Diag("literal operation on R0\n%v", p)
2776 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2778 o1 = AOP_IRR(c.opirr(AADDIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
2781 case 22: /* add $lcon/$andcon,r1,r2 ==> oris+ori+add/ori+add */
2782 if p.To.Reg == REGTMP || p.Reg == REGTMP {
2783 c.ctxt.Diag("can't synthesize large constant\n%v", p)
2785 d := c.vregoff(&p.From)
2790 if p.From.Sym != nil {
2791 c.ctxt.Diag("%v is not supported", p)
2793 // If operand is ANDCON, generate 2 instructions using
2794 // ORI for unsigned value; with LCON 3 instructions.
2796 o1 = LOP_IRR(OP_ORI, REGTMP, REGZERO, uint32(int32(d)))
2797 o2 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2799 o1 = loadu32(REGTMP, d)
2800 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d)))
2801 o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2804 case 23: /* and $lcon/$addcon,r1,r2 ==> oris+ori+and/addi+and */
2805 if p.To.Reg == REGTMP || p.Reg == REGTMP {
2806 c.ctxt.Diag("can't synthesize large constant\n%v", p)
2808 d := c.vregoff(&p.From)
2814 // With ADDCON operand, generate 2 instructions using ADDI for signed value,
2815 // with LCON operand generate 3 instructions.
2817 o1 = LOP_IRR(OP_ADDI, REGZERO, REGTMP, uint32(int32(d)))
2818 o2 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2820 o1 = loadu32(REGTMP, d)
2821 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d)))
2822 o3 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2824 if p.From.Sym != nil {
2825 c.ctxt.Diag("%v is not supported", p)
2828 case 24: /* lfd fA,float64(0) -> xxlxor xsA,xsaA,xsaA + fneg for -0 */
2829 o1 = AOP_XX3I(c.oprrr(AXXLXOR), uint32(p.To.Reg), uint32(p.To.Reg), uint32(p.To.Reg), uint32(0))
2830 // This is needed for -0.
2832 o2 = AOP_RRR(c.oprrr(AFNEG), uint32(p.To.Reg), 0, uint32(p.To.Reg))
2836 /* sld[.] $sh,rS,rA -> rldicr[.] $sh,rS,mask(0,63-sh),rA; srd[.] -> rldicl */
2837 v := c.regoff(&p.From)
2865 c.ctxt.Diag("unexpected op in sldi case\n%v", p)
2870 if p.As == AEXTSWSLI || p.As == AEXTSWSLICC {
2871 o1 = AOP_EXTSWSLI(OP_EXTSWSLI, uint32(r), uint32(p.To.Reg), uint32(v))
2874 o1 = AOP_RLDIC(op, uint32(p.To.Reg), uint32(r), uint32(v), uint32(a))
2876 if p.As == ASLDCC || p.As == ASRDCC || p.As == AEXTSWSLICC {
2877 o1 |= 1 // Set the condition code bit
2880 case 26: /* mov $lsext/auto/oreg,,r2 ==> addis+addi */
2881 v := c.vregoff(&p.From)
2882 r := int(p.From.Reg)
2884 switch p.From.Name {
2885 case obj.NAME_EXTERN, obj.NAME_STATIC:
2886 // Load a 32 bit constant, or relocation depending on if a symbol is attached
2887 o1, o2 = c.symbolAccess(p.From.Sym, v, p.To.Reg, OP_ADDI, true)
2890 r = c.getimpliedreg(&p.From, p)
2892 // Add a 32 bit offset to a register.
2893 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), uint32(r), uint32(high16adjusted(int32(v))))
2894 o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), uint32(p.To.Reg), uint32(v))
2897 case 27: /* subc ra,$simm,rd => subfic rd,ra,$simm */
2898 v := c.regoff(p.GetFrom3())
2900 r := int(p.From.Reg)
2901 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2903 case 28: /* subc r1,$lcon,r2 ==> cau+or+subfc */
2904 if p.To.Reg == REGTMP || p.From.Reg == REGTMP {
2905 c.ctxt.Diag("can't synthesize large constant\n%v", p)
2907 v := c.regoff(p.GetFrom3())
2908 o1 = AOP_IRR(OP_ADDIS, REGTMP, REGZERO, uint32(v)>>16)
2909 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(v))
2910 o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), REGTMP)
2911 if p.From.Sym != nil {
2912 c.ctxt.Diag("%v is not supported", p)
2915 case 29: /* rldic[lr]? $sh,s,$mask,a -- left, right, plain give different masks */
2916 v := c.regoff(&p.From)
2918 d := c.vregoff(p.GetFrom3())
2920 c.maskgen64(p, mask[:], uint64(d))
2923 case ARLDC, ARLDCCC:
2924 a = int(mask[0]) /* MB */
2925 if int32(mask[1]) != (63 - v) {
2926 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[1], v, p)
2929 case ARLDCL, ARLDCLCC:
2930 a = int(mask[0]) /* MB */
2932 c.ctxt.Diag("invalid mask for shift: %x %s (shift %d)\n%v", uint64(d), mask[1], v, p)
2935 case ARLDCR, ARLDCRCC:
2936 a = int(mask[1]) /* ME */
2938 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[0], v, p)
2942 c.ctxt.Diag("unexpected op in rldic case\n%v", p)
2946 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
2947 o1 |= (uint32(a) & 31) << 6
2952 o1 |= 1 << 5 /* mb[5] is top bit */
2955 case 30: /* rldimi $sh,s,$mask,a */
2956 v := c.regoff(&p.From)
2958 d := c.vregoff(p.GetFrom3())
2960 // Original opcodes had mask operands which had to be converted to a shift count as expected by
2963 case ARLDMI, ARLDMICC:
2965 c.maskgen64(p, mask[:], uint64(d))
2966 if int32(mask[1]) != (63 - v) {
2967 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[1], v, p)
2969 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
2970 o1 |= (uint32(mask[0]) & 31) << 6
2974 if mask[0]&0x20 != 0 {
2975 o1 |= 1 << 5 /* mb[5] is top bit */
2978 // Opcodes with shift count operands.
2979 case ARLDIMI, ARLDIMICC:
2980 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
2981 o1 |= (uint32(d) & 31) << 6
2990 case 31: /* dword */
2991 d := c.vregoff(&p.From)
2993 if c.ctxt.Arch.ByteOrder == binary.BigEndian {
2994 o1 = uint32(d >> 32)
2998 o2 = uint32(d >> 32)
3001 if p.From.Sym != nil {
3002 rel := obj.Addrel(c.cursym)
3003 rel.Off = int32(c.pc)
3005 rel.Sym = p.From.Sym
3006 rel.Add = p.From.Offset
3007 rel.Type = objabi.R_ADDR
3012 case 32: /* fmul frc,fra,frd */
3018 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0) | (uint32(p.From.Reg)&31)<<6
3020 case 33: /* fabs [frb,]frd; fmr. frb,frd */
3021 r := int(p.From.Reg)
3023 if oclass(&p.From) == C_NONE {
3026 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(r))
3028 case 34: /* FMADDx fra,frb,frc,frt (t=a*c±b) */
3029 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg)) | (uint32(p.GetFrom3().Reg)&31)<<6
3031 case 35: /* mov r,lext/lauto/loreg ==> cau $(v>>16),sb,r'; store o(r') */
3032 v := c.regoff(&p.To)
3036 r = c.getimpliedreg(&p.To, p)
3038 // Offsets in DS form stores must be a multiple of 4
3039 inst := c.opstore(p.As)
3040 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3041 log.Fatalf("invalid offset for DS form load/store %v", p)
3043 o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v)))
3044 o2 = AOP_IRR(inst, uint32(p.From.Reg), REGTMP, uint32(v))
3046 case 36: /* mov b/bz/h/hz lext/lauto/lreg,r ==> lbz+extsb/lbz/lha/lhz etc */
3047 v := c.regoff(&p.From)
3049 r := int(p.From.Reg)
3051 r = c.getimpliedreg(&p.From, p)
3053 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), uint32(r), uint32(high16adjusted(v)))
3054 o2 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), uint32(p.To.Reg), uint32(v))
3056 // Sign extend MOVB if needed
3057 o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3060 o1 = uint32(c.regoff(&p.From))
3062 case 41: /* stswi */
3063 o1 = AOP_RRR(c.opirr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11
3066 o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11
3068 case 43: /* data cache instructions: op (Ra+[Rb]), [th|l] */
3069 /* TH field for dcbt/dcbtst: */
3070 /* 0 = Block access - program will soon access EA. */
3071 /* 8-15 = Stream access - sequence of access (data stream). See section 4.3.2 of the ISA for details. */
3072 /* 16 = Block access - program will soon make a transient access to EA. */
3073 /* 17 = Block access - program will not access EA for a long time. */
3075 /* L field for dcbf: */
3076 /* 0 = invalidates the block containing EA in all processors. */
3077 /* 1 = same as 0, but with limited scope (i.e. block in the current processor will not be reused soon). */
3078 /* 3 = same as 1, but with even more limited scope (i.e. block in the current processor primary cache will not be reused soon). */
3079 if p.To.Type == obj.TYPE_NONE {
3080 o1 = AOP_RRR(c.oprrr(p.As), 0, uint32(p.From.Index), uint32(p.From.Reg))
3082 th := c.regoff(&p.To)
3083 o1 = AOP_RRR(c.oprrr(p.As), uint32(th), uint32(p.From.Index), uint32(p.From.Reg))
3086 case 44: /* indexed store */
3087 o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg))
3089 case 45: /* indexed load */
3091 /* The assembler accepts a 4-operand l*arx instruction. The fourth operand is an Exclusive Access Hint (EH) */
3092 /* The EH field can be used as a lock acquire/release hint as follows: */
3093 /* 0 = Atomic Update (fetch-and-operate or similar algorithm) */
3094 /* 1 = Exclusive Access (lock acquire and release) */
3095 case ALBAR, ALHAR, ALWAR, ALDAR:
3096 if p.From3Type() != obj.TYPE_NONE {
3097 eh := int(c.regoff(p.GetFrom3()))
3099 c.ctxt.Diag("illegal EH field\n%v", p)
3101 o1 = AOP_RRRI(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg), uint32(eh))
3103 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3106 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3108 case 46: /* plain op */
3111 case 47: /* op Ra, Rd; also op [Ra,] Rd */
3112 r := int(p.From.Reg)
3117 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0)
3119 case 48: /* op Rs, Ra */
3120 r := int(p.From.Reg)
3125 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0)
3127 case 49: /* op Rb; op $n, Rb */
3128 if p.From.Type != obj.TYPE_REG { /* tlbie $L, rB */
3129 v := c.regoff(&p.From) & 1
3130 o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.To.Reg)) | uint32(v)<<21
3132 o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.From.Reg))
3135 case 50: /* rem[u] r1[,r2],r3 */
3142 t := v & (1<<10 | 1) /* OE|Rc */
3143 o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg))
3144 o2 = AOP_RRR(OP_MULLW, REGTMP, REGTMP, uint32(p.From.Reg))
3145 o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r))
3149 /* Clear top 32 bits */
3150 o3 = OP_RLW(OP_RLDIC, REGTMP, REGTMP, 0, 0, 0) | 1<<5
3153 case 51: /* remd[u] r1[,r2],r3 */
3160 t := v & (1<<10 | 1) /* OE|Rc */
3161 o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg))
3162 o2 = AOP_RRR(OP_MULLD, REGTMP, REGTMP, uint32(p.From.Reg))
3163 o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r))
3164 /* cases 50,51: removed; can be reused. */
3166 /* cases 50,51: removed; can be reused. */
3168 case 52: /* mtfsbNx cr(n) */
3169 v := c.regoff(&p.From) & 31
3171 o1 = AOP_RRR(c.oprrr(p.As), uint32(v), 0, 0)
3173 case 53: /* mffsX ,fr1 */
3174 o1 = AOP_RRR(OP_MFFS, uint32(p.To.Reg), 0, 0)
3176 case 55: /* op Rb, Rd */
3177 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(p.From.Reg))
3179 case 56: /* sra $sh,[s,]a; srd $sh,[s,]a */
3180 v := c.regoff(&p.From)
3186 o1 = AOP_RRR(c.opirr(p.As), uint32(r), uint32(p.To.Reg), uint32(v)&31)
3187 if (p.As == ASRAD || p.As == ASRADCC) && (v&0x20 != 0) {
3188 o1 |= 1 << 1 /* mb[5] */
3191 case 57: /* slw $sh,[s,]a -> rlwinm ... */
3192 v := c.regoff(&p.From)
3200 * Let user (gs) shoot himself in the foot.
3201 * qc has already complained.
3204 ctxt->diag("illegal shift %ld\n%v", v, p);
3214 mask[0], mask[1] = 0, 31
3216 mask[0], mask[1] = uint8(v), 31
3219 mask[0], mask[1] = 0, uint8(31-v)
3221 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(r), uint32(v), uint32(mask[0]), uint32(mask[1]))
3222 if p.As == ASLWCC || p.As == ASRWCC {
3223 o1 |= 1 // set the condition code
3226 case 58: /* logical $andcon,[s],a */
3227 v := c.regoff(&p.From)
3233 o1 = LOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3235 case 59: /* or/xor/and $ucon,,r | oris/xoris/andis $addcon,r,r */
3236 v := c.regoff(&p.From)
3244 o1 = LOP_IRR(c.opirr(AORIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16) /* oris, xoris, andis. */
3246 o1 = LOP_IRR(c.opirr(AXORIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
3248 o1 = LOP_IRR(c.opirr(AANDISCC), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
3250 o1 = LOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3253 case 60: /* tw to,a,b */
3254 r := int(c.regoff(&p.From) & 31)
3256 o1 = AOP_RRR(c.oprrr(p.As), uint32(r), uint32(p.Reg), uint32(p.To.Reg))
3258 case 61: /* tw to,a,$simm */
3259 r := int(c.regoff(&p.From) & 31)
3261 v := c.regoff(&p.To)
3262 o1 = AOP_IRR(c.opirr(p.As), uint32(r), uint32(p.Reg), uint32(v))
3264 case 62: /* rlwmi $sh,s,$mask,a */
3265 v := c.regoff(&p.From)
3268 n := c.regoff(p.GetFrom3())
3269 // This is an extended mnemonic described in the ISA C.8.2
3270 // clrlslwi ra,rs,b,n -> rlwinm ra,rs,n,b-n,31-n
3271 // It maps onto rlwinm which is directly generated here.
3272 if n > v || v >= 32 {
3273 c.ctxt.Diag("Invalid n or b for CLRLSLWI: %x %x\n%v", v, n, p)
3276 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.Reg), uint32(n), uint32(v-n), uint32(31-n))
3279 c.maskgen(p, mask[:], uint32(c.regoff(p.GetFrom3())))
3280 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), uint32(v))
3281 o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1
3284 case 63: /* rlwmi b,s,$mask,a */
3286 c.maskgen(p, mask[:], uint32(c.regoff(p.GetFrom3())))
3287 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.Reg), uint32(p.To.Reg), uint32(p.From.Reg))
3288 o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1
3290 case 64: /* mtfsf fr[, $m] {,fpcsr} */
3292 if p.From3Type() != obj.TYPE_NONE {
3293 v = c.regoff(p.GetFrom3()) & 255
3297 o1 = OP_MTFSF | uint32(v)<<17 | uint32(p.From.Reg)<<11
3299 case 65: /* MOVFL $imm,FPSCR(n) => mtfsfi crfd,imm */
3301 c.ctxt.Diag("must specify FPSCR(n)\n%v", p)
3303 o1 = OP_MTFSFI | (uint32(p.To.Reg)&15)<<23 | (uint32(c.regoff(&p.From))&31)<<12
3305 case 66: /* mov spr,r1; mov r1,spr, also dcr */
3308 if REG_R0 <= p.From.Reg && p.From.Reg <= REG_R31 {
3311 if REG_DCR0 <= v && v <= REG_DCR0+1023 {
3312 o1 = OPVCC(31, 451, 0, 0) /* mtdcr */
3314 o1 = OPVCC(31, 467, 0, 0) /* mtspr */
3318 v = int32(p.From.Reg)
3319 if REG_DCR0 <= v && v <= REG_DCR0+1023 {
3320 o1 = OPVCC(31, 323, 0, 0) /* mfdcr */
3322 o1 = OPVCC(31, 339, 0, 0) /* mfspr */
3326 o1 = AOP_RRR(o1, uint32(r), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11
3328 case 67: /* mcrf crfD,crfS */
3329 if p.From.Type != obj.TYPE_REG || p.From.Reg < REG_CR0 || REG_CR7 < p.From.Reg || p.To.Type != obj.TYPE_REG || p.To.Reg < REG_CR0 || REG_CR7 < p.To.Reg {
3330 c.ctxt.Diag("illegal CR field number\n%v", p)
3332 o1 = AOP_RRR(OP_MCRF, ((uint32(p.To.Reg) & 7) << 2), ((uint32(p.From.Reg) & 7) << 2), 0)
3334 case 68: /* mfcr rD; mfocrf CRM,rD */
3335 if p.From.Type == obj.TYPE_REG && REG_CR0 <= p.From.Reg && p.From.Reg <= REG_CR7 {
3336 v := int32(1 << uint(7-(p.To.Reg&7))) /* CR(n) */
3337 o1 = AOP_RRR(OP_MFCR, uint32(p.To.Reg), 0, 0) | 1<<20 | uint32(v)<<12 /* new form, mfocrf */
3339 o1 = AOP_RRR(OP_MFCR, uint32(p.To.Reg), 0, 0) /* old form, whole register */
3342 case 69: /* mtcrf CRM,rS */
3344 if p.From3Type() != obj.TYPE_NONE {
3346 c.ctxt.Diag("can't use both mask and CR(n)\n%v", p)
3348 v = c.regoff(p.GetFrom3()) & 0xff
3353 v = 1 << uint(7-(p.To.Reg&7)) /* CR(n) */
3357 o1 = AOP_RRR(OP_MTCRF, uint32(p.From.Reg), 0, 0) | uint32(v)<<12
3359 case 70: /* [f]cmp r,r,cr*/
3364 r = (int(p.Reg) & 7) << 2
3366 o1 = AOP_RRR(c.oprrr(p.As), uint32(r), uint32(p.From.Reg), uint32(p.To.Reg))
3368 case 71: /* cmp[l] r,i,cr*/
3373 r = (int(p.Reg) & 7) << 2
3375 o1 = AOP_RRR(c.opirr(p.As), uint32(r), uint32(p.From.Reg), 0) | uint32(c.regoff(&p.To))&0xffff
3377 case 72: /* slbmte (Rb+Rs -> slb[Rb]) -> Rs, Rb */
3378 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), 0, uint32(p.To.Reg))
3380 case 73: /* mcrfs crfD,crfS */
3381 if p.From.Type != obj.TYPE_REG || p.From.Reg != REG_FPSCR || p.To.Type != obj.TYPE_REG || p.To.Reg < REG_CR0 || REG_CR7 < p.To.Reg {
3382 c.ctxt.Diag("illegal FPSCR/CR field number\n%v", p)
3384 o1 = AOP_RRR(OP_MCRFS, ((uint32(p.To.Reg) & 7) << 2), ((0 & 7) << 2), 0)
3386 case 77: /* syscall $scon, syscall Rx */
3387 if p.From.Type == obj.TYPE_CONST {
3388 if p.From.Offset > BIG || p.From.Offset < -BIG {
3389 c.ctxt.Diag("illegal syscall, sysnum too large: %v", p)
3391 o1 = AOP_IRR(OP_ADDI, REGZERO, REGZERO, uint32(p.From.Offset))
3392 } else if p.From.Type == obj.TYPE_REG {
3393 o1 = LOP_RRR(OP_OR, REGZERO, uint32(p.From.Reg), uint32(p.From.Reg))
3395 c.ctxt.Diag("illegal syscall: %v", p)
3396 o1 = 0x7fe00008 // trap always
3400 o3 = AOP_RRR(c.oprrr(AXOR), REGZERO, REGZERO, REGZERO) // XOR R0, R0
3402 case 78: /* undef */
3403 o1 = 0 /* "An instruction consisting entirely of binary 0s is guaranteed
3404 always to be an illegal instruction." */
3406 /* relocation operations */
3408 v := c.vregoff(&p.To)
3409 // Offsets in DS form stores must be a multiple of 4
3410 inst := c.opstore(p.As)
3411 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3412 log.Fatalf("invalid offset for DS form load/store %v", p)
3414 // Can't reuse base for store instructions.
3415 o1, o2 = c.symbolAccess(p.To.Sym, v, p.From.Reg, inst, false)
3417 case 75: // 32 bit offset symbol loads (got/toc/addr)
3420 // Offsets in DS form loads must be a multiple of 4
3421 inst := c.opload(p.As)
3422 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3423 log.Fatalf("invalid offset for DS form load/store %v", p)
3425 switch p.From.Name {
3426 case obj.NAME_GOTREF, obj.NAME_TOCREF:
3428 c.ctxt.Diag("invalid offset for GOT/TOC access %v", p)
3430 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
3431 o2 = AOP_IRR(inst, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3432 rel := obj.Addrel(c.cursym)
3433 rel.Off = int32(c.pc)
3435 rel.Sym = p.From.Sym
3436 switch p.From.Name {
3437 case obj.NAME_GOTREF:
3438 rel.Type = objabi.R_ADDRPOWER_GOT
3439 case obj.NAME_TOCREF:
3440 rel.Type = objabi.R_ADDRPOWER_TOCREL_DS
3443 reuseBaseReg := p.As != AFMOVD && p.As != AFMOVS
3444 // Reuse To.Reg as base register if not FP move.
3445 o1, o2 = c.symbolAccess(p.From.Sym, v, p.To.Reg, inst, reuseBaseReg)
3448 o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3451 if p.From.Offset != 0 {
3452 c.ctxt.Diag("invalid offset against tls var %v", p)
3454 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R13, 0)
3455 o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3456 rel := obj.Addrel(c.cursym)
3457 rel.Off = int32(c.pc)
3459 rel.Sym = p.From.Sym
3460 rel.Type = objabi.R_POWER_TLS_LE
3463 if p.From.Offset != 0 {
3464 c.ctxt.Diag("invalid offset against tls var %v", p)
3466 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
3467 o2 = AOP_IRR(c.opload(AMOVD), uint32(p.To.Reg), uint32(p.To.Reg), 0)
3468 o3 = AOP_RRR(OP_ADD, uint32(p.To.Reg), uint32(p.To.Reg), REG_R13)
3469 rel := obj.Addrel(c.cursym)
3470 rel.Off = int32(c.pc)
3472 rel.Sym = p.From.Sym
3473 rel.Type = objabi.R_POWER_TLS_IE
3474 rel = obj.Addrel(c.cursym)
3475 rel.Off = int32(c.pc) + 8
3477 rel.Sym = p.From.Sym
3478 rel.Type = objabi.R_POWER_TLS
3480 case 82: /* vector instructions, VX-form and VC-form */
3481 if p.From.Type == obj.TYPE_REG {
3482 /* reg reg none OR reg reg reg */
3483 /* 3-register operand order: VRA, VRB, VRT */
3484 /* 2-register operand order: VRA, VRT */
3485 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3486 } else if p.From3Type() == obj.TYPE_CONST {
3487 /* imm imm reg reg */
3488 /* operand order: SIX, VRA, ST, VRT */
3489 six := int(c.regoff(&p.From))
3490 st := int(c.regoff(p.GetFrom3()))
3491 o1 = AOP_IIRR(c.opiirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(st), uint32(six))
3492 } else if p.From3Type() == obj.TYPE_NONE && p.Reg != 0 {
3494 /* operand order: UIM, VRB, VRT */
3495 uim := int(c.regoff(&p.From))
3496 o1 = AOP_VIRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(uim))
3499 /* operand order: SIM, VRT */
3500 sim := int(c.regoff(&p.From))
3501 o1 = AOP_IR(c.opirr(p.As), uint32(p.To.Reg), uint32(sim))
3504 case 83: /* vector instructions, VA-form */
3505 if p.From.Type == obj.TYPE_REG {
3506 /* reg reg reg reg */
3507 /* 4-register operand order: VRA, VRB, VRC, VRT */
3508 o1 = AOP_RRRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg))
3509 } else if p.From.Type == obj.TYPE_CONST {
3510 /* imm reg reg reg */
3511 /* operand order: SHB, VRA, VRB, VRT */
3512 shb := int(c.regoff(&p.From))
3513 o1 = AOP_IRRR(c.opirrr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(shb))
3516 case 84: // ISEL BC,RA,RB,RT -> isel rt,ra,rb,bc
3517 bc := c.vregoff(&p.From)
3519 // rt = To.Reg, ra = p.Reg, rb = p.From3.Reg
3520 o1 = AOP_ISEL(OP_ISEL, uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(bc))
3522 case 85: /* vector instructions, VX-form */
3524 /* 2-register operand order: VRB, VRT */
3525 o1 = AOP_RR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg))
3527 case 86: /* VSX indexed store, XX1-form */
3529 /* 3-register operand order: XT, (RB)(RA*1) */
3530 o1 = AOP_XX1(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg))
3532 case 87: /* VSX indexed load, XX1-form */
3534 /* 3-register operand order: (RB)(RA*1), XT */
3535 o1 = AOP_XX1(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3537 case 88: /* VSX instructions, XX1-form */
3538 /* reg reg none OR reg reg reg */
3539 /* 3-register operand order: RA, RB, XT */
3540 /* 2-register operand order: XS, RA or RA, XT */
3541 xt := int32(p.To.Reg)
3542 xs := int32(p.From.Reg)
3543 /* We need to treat the special case of extended mnemonics that may have a FREG/VREG as an argument */
3544 if REG_V0 <= xt && xt <= REG_V31 {
3545 /* Convert V0-V31 to VS32-VS63 */
3547 o1 = AOP_XX1(c.oprrr(p.As), uint32(xt), uint32(p.From.Reg), uint32(p.Reg))
3548 } else if REG_F0 <= xt && xt <= REG_F31 {
3549 /* Convert F0-F31 to VS0-VS31 */
3551 o1 = AOP_XX1(c.oprrr(p.As), uint32(xt), uint32(p.From.Reg), uint32(p.Reg))
3552 } else if REG_VS0 <= xt && xt <= REG_VS63 {
3553 o1 = AOP_XX1(c.oprrr(p.As), uint32(xt), uint32(p.From.Reg), uint32(p.Reg))
3554 } else if REG_V0 <= xs && xs <= REG_V31 {
3555 /* Likewise for XS */
3557 o1 = AOP_XX1(c.oprrr(p.As), uint32(xs), uint32(p.To.Reg), uint32(p.Reg))
3558 } else if REG_F0 <= xs && xs <= REG_F31 {
3560 o1 = AOP_XX1(c.oprrr(p.As), uint32(xs), uint32(p.To.Reg), uint32(p.Reg))
3561 } else if REG_VS0 <= xs && xs <= REG_VS63 {
3562 o1 = AOP_XX1(c.oprrr(p.As), uint32(xs), uint32(p.To.Reg), uint32(p.Reg))
3565 case 89: /* VSX instructions, XX2-form */
3566 /* reg none reg OR reg imm reg */
3567 /* 2-register operand order: XB, XT or XB, UIM, XT*/
3568 uim := int(c.regoff(p.GetFrom3()))
3569 o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(uim), uint32(p.From.Reg))
3571 case 90: /* VSX instructions, XX3-form */
3572 if p.From3Type() == obj.TYPE_NONE {
3574 /* 3-register operand order: XA, XB, XT */
3575 o1 = AOP_XX3(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3576 } else if p.From3Type() == obj.TYPE_CONST {
3577 /* reg reg reg imm */
3578 /* operand order: XA, XB, DM, XT */
3579 dm := int(c.regoff(p.GetFrom3()))
3580 o1 = AOP_XX3I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(dm))
3583 case 91: /* VSX instructions, XX4-form */
3584 /* reg reg reg reg */
3585 /* 3-register operand order: XA, XB, XC, XT */
3586 o1 = AOP_XX4(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg))
3588 case 92: /* X-form instructions, 3-operands */
3589 if p.To.Type == obj.TYPE_CONST {
3591 xf := int32(p.From.Reg)
3592 if REG_F0 <= xf && xf <= REG_F31 {
3593 /* operand order: FRA, FRB, BF */
3594 bf := int(c.regoff(&p.To)) << 2
3595 o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg))
3597 /* operand order: RA, RB, L */
3598 l := int(c.regoff(&p.To))
3599 o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.From.Reg), uint32(p.Reg))
3601 } else if p.From3Type() == obj.TYPE_CONST {
3603 /* operand order: RB, L, RA */
3604 l := int(c.regoff(p.GetFrom3()))
3605 o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.To.Reg), uint32(p.From.Reg))
3606 } else if p.To.Type == obj.TYPE_REG {
3607 cr := int32(p.To.Reg)
3608 if REG_CR0 <= cr && cr <= REG_CR7 {
3610 /* operand order: RA, RB, BF */
3611 bf := (int(p.To.Reg) & 7) << 2
3612 o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg))
3613 } else if p.From.Type == obj.TYPE_CONST {
3615 /* operand order: L, RT */
3616 l := int(c.regoff(&p.From))
3617 o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(l), uint32(p.Reg))
3620 case ACOPY, APASTECC:
3621 o1 = AOP_RRR(c.opirr(p.As), uint32(1), uint32(p.From.Reg), uint32(p.To.Reg))
3624 /* operand order: RS, RB, RA */
3625 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3630 case 93: /* X-form instructions, 2-operands */
3631 if p.To.Type == obj.TYPE_CONST {
3633 /* operand order: FRB, BF */
3634 bf := int(c.regoff(&p.To)) << 2
3635 o1 = AOP_RR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg))
3636 } else if p.Reg == 0 {
3637 /* popcnt* r,r, X-form */
3638 /* operand order: RS, RA */
3639 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3642 case 94: /* Z23-form instructions, 4-operands */
3643 /* reg reg reg imm */
3644 /* operand order: RA, RB, CY, RT */
3645 cy := int(c.regoff(p.GetFrom3()))
3646 o1 = AOP_Z23I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(cy))
3648 case 96: /* VSX load, DQ-form */
3650 /* operand order: (RA)(DQ), XT */
3651 dq := int16(c.regoff(&p.From))
3653 c.ctxt.Diag("invalid offset for DQ form load/store %v", dq)
3655 o1 = AOP_DQ(c.opload(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(dq))
3657 case 97: /* VSX store, DQ-form */
3659 /* operand order: XT, (RA)(DQ) */
3660 dq := int16(c.regoff(&p.To))
3662 c.ctxt.Diag("invalid offset for DQ form load/store %v", dq)
3664 o1 = AOP_DQ(c.opstore(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(dq))
3665 case 98: /* VSX indexed load or load with length (also left-justified), x-form */
3666 /* vsreg, reg, reg */
3667 o1 = AOP_XX1(c.opload(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3668 case 99: /* VSX store with length (also left-justified) x-form */
3669 /* reg, reg, vsreg */
3670 o1 = AOP_XX1(c.opstore(p.As), uint32(p.From.Reg), uint32(p.Reg), uint32(p.To.Reg))
3671 case 100: /* VSX X-form XXSPLTIB */
3672 if p.From.Type == obj.TYPE_CONST {
3674 uim := int(c.regoff(&p.From))
3676 /* Use AOP_XX1 form with 0 for one of the registers. */
3677 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.To.Reg), uint32(0), uint32(uim))
3679 c.ctxt.Diag("invalid ops for %v", p.As)
3682 o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(0), uint32(p.From.Reg))
3684 case 102: /* RLWMI $sh,rs,$mb,$me,rt (M-form opcode)*/
3685 mb := uint32(c.regoff(&p.RestArgs[0].Addr))
3686 me := uint32(c.regoff(&p.RestArgs[1].Addr))
3687 sh := uint32(c.regoff(&p.From))
3688 o1 = OP_RLW(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), sh, mb, me)
3690 case 103: /* RLWMI rb,rs,$mb,$me,rt (M-form opcode)*/
3691 mb := uint32(c.regoff(&p.RestArgs[0].Addr))
3692 me := uint32(c.regoff(&p.RestArgs[1].Addr))
3693 o1 = OP_RLW(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.From.Reg), mb, me)
3703 func (c *ctxt9) vregoff(a *obj.Addr) int64 {
3711 func (c *ctxt9) regoff(a *obj.Addr) int32 {
3712 return int32(c.vregoff(a))
3715 func (c *ctxt9) oprrr(a obj.As) uint32 {
3718 return OPVCC(31, 266, 0, 0)
3720 return OPVCC(31, 266, 0, 1)
3722 return OPVCC(31, 266, 1, 0)
3724 return OPVCC(31, 266, 1, 1)
3726 return OPVCC(31, 10, 0, 0)
3728 return OPVCC(31, 10, 0, 1)
3730 return OPVCC(31, 10, 1, 0)
3732 return OPVCC(31, 10, 1, 1)
3734 return OPVCC(31, 138, 0, 0)
3736 return OPVCC(31, 138, 0, 1)
3738 return OPVCC(31, 138, 1, 0)
3740 return OPVCC(31, 138, 1, 1)
3742 return OPVCC(31, 234, 0, 0)
3744 return OPVCC(31, 234, 0, 1)
3746 return OPVCC(31, 234, 1, 0)
3748 return OPVCC(31, 234, 1, 1)
3750 return OPVCC(31, 202, 0, 0)
3752 return OPVCC(31, 202, 0, 1)
3754 return OPVCC(31, 202, 1, 0)
3756 return OPVCC(31, 202, 1, 1)
3758 return OPVCC(31, 170, 0, 0) /* addex - v3.0b */
3761 return OPVCC(31, 28, 0, 0)
3763 return OPVCC(31, 28, 0, 1)
3765 return OPVCC(31, 60, 0, 0)
3767 return OPVCC(31, 60, 0, 1)
3770 return OPVCC(31, 0, 0, 0) | 1<<21 /* L=1 */
3772 return OPVCC(31, 32, 0, 0) | 1<<21
3774 return OPVCC(31, 0, 0, 0) /* L=0 */
3776 return OPVCC(31, 32, 0, 0)
3778 return OPVCC(31, 508, 0, 0) /* cmpb - v2.05 */
3780 return OPVCC(31, 224, 0, 0) /* cmpeqb - v3.00 */
3783 return OPVCC(31, 26, 0, 0)
3785 return OPVCC(31, 26, 0, 1)
3787 return OPVCC(31, 58, 0, 0)
3789 return OPVCC(31, 58, 0, 1)
3792 return OPVCC(19, 257, 0, 0)
3794 return OPVCC(19, 129, 0, 0)
3796 return OPVCC(19, 289, 0, 0)
3798 return OPVCC(19, 225, 0, 0)
3800 return OPVCC(19, 33, 0, 0)
3802 return OPVCC(19, 449, 0, 0)
3804 return OPVCC(19, 417, 0, 0)
3806 return OPVCC(19, 193, 0, 0)
3809 return OPVCC(31, 86, 0, 0)
3811 return OPVCC(31, 470, 0, 0)
3813 return OPVCC(31, 54, 0, 0)
3815 return OPVCC(31, 278, 0, 0)
3817 return OPVCC(31, 246, 0, 0)
3819 return OPVCC(31, 1014, 0, 0)
3822 return OPVCC(31, 265, 0, 0) /* modud - v3.0 */
3824 return OPVCC(31, 267, 0, 0) /* moduw - v3.0 */
3826 return OPVCC(31, 777, 0, 0) /* modsd - v3.0 */
3828 return OPVCC(31, 779, 0, 0) /* modsw - v3.0 */
3831 return OPVCC(31, 491, 0, 0)
3834 return OPVCC(31, 491, 0, 1)
3837 return OPVCC(31, 491, 1, 0)
3840 return OPVCC(31, 491, 1, 1)
3843 return OPVCC(31, 459, 0, 0)
3846 return OPVCC(31, 459, 0, 1)
3849 return OPVCC(31, 459, 1, 0)
3852 return OPVCC(31, 459, 1, 1)
3855 return OPVCC(31, 489, 0, 0)
3858 return OPVCC(31, 489, 0, 1)
3861 return OPVCC(31, 425, 0, 0)
3864 return OPVCC(31, 425, 0, 1)
3867 return OPVCC(31, 393, 0, 0)
3870 return OPVCC(31, 393, 0, 1)
3873 return OPVCC(31, 489, 1, 0)
3876 return OPVCC(31, 489, 1, 1)
3878 case ADIVDU, AREMDU:
3879 return OPVCC(31, 457, 0, 0)
3882 return OPVCC(31, 457, 0, 1)
3885 return OPVCC(31, 457, 1, 0)
3888 return OPVCC(31, 457, 1, 1)
3891 return OPVCC(31, 854, 0, 0)
3894 return OPVCC(31, 284, 0, 0)
3896 return OPVCC(31, 284, 0, 1)
3899 return OPVCC(31, 954, 0, 0)
3901 return OPVCC(31, 954, 0, 1)
3903 return OPVCC(31, 922, 0, 0)
3905 return OPVCC(31, 922, 0, 1)
3907 return OPVCC(31, 986, 0, 0)
3909 return OPVCC(31, 986, 0, 1)
3912 return OPVCC(63, 264, 0, 0)
3914 return OPVCC(63, 264, 0, 1)
3916 return OPVCC(63, 21, 0, 0)
3918 return OPVCC(63, 21, 0, 1)
3920 return OPVCC(59, 21, 0, 0)
3922 return OPVCC(59, 21, 0, 1)
3924 return OPVCC(63, 32, 0, 0)
3926 return OPVCC(63, 0, 0, 0)
3928 return OPVCC(63, 846, 0, 0)
3930 return OPVCC(63, 846, 0, 1)
3932 return OPVCC(63, 974, 0, 0)
3934 return OPVCC(63, 974, 0, 1)
3936 return OPVCC(59, 846, 0, 0)
3938 return OPVCC(59, 846, 0, 1)
3940 return OPVCC(63, 14, 0, 0)
3942 return OPVCC(63, 14, 0, 1)
3944 return OPVCC(63, 15, 0, 0)
3946 return OPVCC(63, 15, 0, 1)
3948 return OPVCC(63, 814, 0, 0)
3950 return OPVCC(63, 814, 0, 1)
3952 return OPVCC(63, 815, 0, 0)
3954 return OPVCC(63, 815, 0, 1)
3956 return OPVCC(63, 18, 0, 0)
3958 return OPVCC(63, 18, 0, 1)
3960 return OPVCC(59, 18, 0, 0)
3962 return OPVCC(59, 18, 0, 1)
3964 return OPVCC(63, 29, 0, 0)
3966 return OPVCC(63, 29, 0, 1)
3968 return OPVCC(59, 29, 0, 0)
3970 return OPVCC(59, 29, 0, 1)
3972 case AFMOVS, AFMOVD:
3973 return OPVCC(63, 72, 0, 0) /* load */
3975 return OPVCC(63, 72, 0, 1)
3977 return OPVCC(63, 28, 0, 0)
3979 return OPVCC(63, 28, 0, 1)
3981 return OPVCC(59, 28, 0, 0)
3983 return OPVCC(59, 28, 0, 1)
3985 return OPVCC(63, 25, 0, 0)
3987 return OPVCC(63, 25, 0, 1)
3989 return OPVCC(59, 25, 0, 0)
3991 return OPVCC(59, 25, 0, 1)
3993 return OPVCC(63, 136, 0, 0)
3995 return OPVCC(63, 136, 0, 1)
3997 return OPVCC(63, 40, 0, 0)
3999 return OPVCC(63, 40, 0, 1)
4001 return OPVCC(63, 31, 0, 0)
4003 return OPVCC(63, 31, 0, 1)
4005 return OPVCC(59, 31, 0, 0)
4007 return OPVCC(59, 31, 0, 1)
4009 return OPVCC(63, 30, 0, 0)
4011 return OPVCC(63, 30, 0, 1)
4013 return OPVCC(59, 30, 0, 0)
4015 return OPVCC(59, 30, 0, 1)
4017 return OPVCC(63, 8, 0, 0)
4019 return OPVCC(63, 8, 0, 1)
4021 return OPVCC(59, 24, 0, 0)
4023 return OPVCC(59, 24, 0, 1)
4025 return OPVCC(63, 488, 0, 0)
4027 return OPVCC(63, 488, 0, 1)
4029 return OPVCC(63, 456, 0, 0)
4031 return OPVCC(63, 456, 0, 1)
4033 return OPVCC(63, 424, 0, 0)
4035 return OPVCC(63, 424, 0, 1)
4037 return OPVCC(63, 392, 0, 0)
4039 return OPVCC(63, 392, 0, 1)
4041 return OPVCC(63, 12, 0, 0)
4043 return OPVCC(63, 12, 0, 1)
4045 return OPVCC(63, 26, 0, 0)
4047 return OPVCC(63, 26, 0, 1)
4049 return OPVCC(63, 23, 0, 0)
4051 return OPVCC(63, 23, 0, 1)
4053 return OPVCC(63, 22, 0, 0)
4055 return OPVCC(63, 22, 0, 1)
4057 return OPVCC(59, 22, 0, 0)
4059 return OPVCC(59, 22, 0, 1)
4061 return OPVCC(63, 20, 0, 0)
4063 return OPVCC(63, 20, 0, 1)
4065 return OPVCC(59, 20, 0, 0)
4067 return OPVCC(59, 20, 0, 1)
4070 return OPVCC(31, 982, 0, 0)
4072 return OPVCC(19, 150, 0, 0)
4075 return OPVCC(63, 70, 0, 0)
4077 return OPVCC(63, 70, 0, 1)
4079 return OPVCC(63, 38, 0, 0)
4081 return OPVCC(63, 38, 0, 1)
4084 return OPVCC(31, 75, 0, 0)
4086 return OPVCC(31, 75, 0, 1)
4088 return OPVCC(31, 11, 0, 0)
4090 return OPVCC(31, 11, 0, 1)
4092 return OPVCC(31, 235, 0, 0)
4094 return OPVCC(31, 235, 0, 1)
4096 return OPVCC(31, 235, 1, 0)
4098 return OPVCC(31, 235, 1, 1)
4101 return OPVCC(31, 73, 0, 0)
4103 return OPVCC(31, 73, 0, 1)
4105 return OPVCC(31, 9, 0, 0)
4107 return OPVCC(31, 9, 0, 1)
4109 return OPVCC(31, 233, 0, 0)
4111 return OPVCC(31, 233, 0, 1)
4113 return OPVCC(31, 233, 1, 0)
4115 return OPVCC(31, 233, 1, 1)
4118 return OPVCC(31, 476, 0, 0)
4120 return OPVCC(31, 476, 0, 1)
4122 return OPVCC(31, 104, 0, 0)
4124 return OPVCC(31, 104, 0, 1)
4126 return OPVCC(31, 104, 1, 0)
4128 return OPVCC(31, 104, 1, 1)
4130 return OPVCC(31, 124, 0, 0)
4132 return OPVCC(31, 124, 0, 1)
4134 return OPVCC(31, 444, 0, 0)
4136 return OPVCC(31, 444, 0, 1)
4138 return OPVCC(31, 412, 0, 0)
4140 return OPVCC(31, 412, 0, 1)
4143 return OPVCC(31, 506, 0, 0) /* popcntd - v2.06 */
4145 return OPVCC(31, 378, 0, 0) /* popcntw - v2.06 */
4147 return OPVCC(31, 122, 0, 0) /* popcntb - v2.02 */
4149 return OPVCC(31, 538, 0, 0) /* cnttzw - v3.00 */
4151 return OPVCC(31, 538, 0, 1) /* cnttzw. - v3.00 */
4153 return OPVCC(31, 570, 0, 0) /* cnttzd - v3.00 */
4155 return OPVCC(31, 570, 0, 1) /* cnttzd. - v3.00 */
4158 return OPVCC(19, 50, 0, 0)
4160 return OPVCC(19, 51, 0, 0)
4162 return OPVCC(19, 18, 0, 0)
4164 return OPVCC(19, 274, 0, 0)
4167 return OPVCC(20, 0, 0, 0)
4169 return OPVCC(20, 0, 0, 1)
4171 return OPVCC(23, 0, 0, 0)
4173 return OPVCC(23, 0, 0, 1)
4176 return OPVCC(30, 8, 0, 0)
4178 return OPVCC(30, 0, 0, 1)
4181 return OPVCC(30, 9, 0, 0)
4183 return OPVCC(30, 9, 0, 1)
4186 return OPVCC(30, 0, 0, 0)
4188 return OPVCC(30, 0, 0, 1)
4190 return OPMD(30, 1, 0) // rldicr
4192 return OPMD(30, 1, 1) // rldicr.
4195 return OPMD(30, 2, 0) // rldic
4197 return OPMD(30, 2, 1) // rldic.
4200 return OPVCC(17, 1, 0, 0)
4203 return OPVCC(31, 24, 0, 0)
4205 return OPVCC(31, 24, 0, 1)
4207 return OPVCC(31, 27, 0, 0)
4209 return OPVCC(31, 27, 0, 1)
4212 return OPVCC(31, 792, 0, 0)
4214 return OPVCC(31, 792, 0, 1)
4216 return OPVCC(31, 794, 0, 0)
4218 return OPVCC(31, 794, 0, 1)
4221 return OPVCC(31, 445, 0, 0)
4223 return OPVCC(31, 445, 0, 1)
4226 return OPVCC(31, 536, 0, 0)
4228 return OPVCC(31, 536, 0, 1)
4230 return OPVCC(31, 539, 0, 0)
4232 return OPVCC(31, 539, 0, 1)
4235 return OPVCC(31, 40, 0, 0)
4237 return OPVCC(31, 40, 0, 1)
4239 return OPVCC(31, 40, 1, 0)
4241 return OPVCC(31, 40, 1, 1)
4243 return OPVCC(31, 8, 0, 0)
4245 return OPVCC(31, 8, 0, 1)
4247 return OPVCC(31, 8, 1, 0)
4249 return OPVCC(31, 8, 1, 1)
4251 return OPVCC(31, 136, 0, 0)
4253 return OPVCC(31, 136, 0, 1)
4255 return OPVCC(31, 136, 1, 0)
4257 return OPVCC(31, 136, 1, 1)
4259 return OPVCC(31, 232, 0, 0)
4261 return OPVCC(31, 232, 0, 1)
4263 return OPVCC(31, 232, 1, 0)
4265 return OPVCC(31, 232, 1, 1)
4267 return OPVCC(31, 200, 0, 0)
4269 return OPVCC(31, 200, 0, 1)
4271 return OPVCC(31, 200, 1, 0)
4273 return OPVCC(31, 200, 1, 1)
4276 return OPVCC(31, 598, 0, 0)
4278 return OPVCC(31, 598, 0, 0) | 1<<21
4281 return OPVCC(31, 598, 0, 0) | 2<<21
4284 return OPVCC(31, 306, 0, 0)
4286 return OPVCC(31, 274, 0, 0)
4288 return OPVCC(31, 566, 0, 0)
4290 return OPVCC(31, 498, 0, 0)
4292 return OPVCC(31, 434, 0, 0)
4294 return OPVCC(31, 915, 0, 0)
4296 return OPVCC(31, 851, 0, 0)
4298 return OPVCC(31, 402, 0, 0)
4301 return OPVCC(31, 4, 0, 0)
4303 return OPVCC(31, 68, 0, 0)
4305 /* Vector (VMX/Altivec) instructions */
4306 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
4307 /* are enabled starting at POWER6 (ISA 2.05). */
4309 return OPVX(4, 1028, 0, 0) /* vand - v2.03 */
4311 return OPVX(4, 1092, 0, 0) /* vandc - v2.03 */
4313 return OPVX(4, 1412, 0, 0) /* vnand - v2.07 */
4316 return OPVX(4, 1156, 0, 0) /* vor - v2.03 */
4318 return OPVX(4, 1348, 0, 0) /* vorc - v2.07 */
4320 return OPVX(4, 1284, 0, 0) /* vnor - v2.03 */
4322 return OPVX(4, 1220, 0, 0) /* vxor - v2.03 */
4324 return OPVX(4, 1668, 0, 0) /* veqv - v2.07 */
4327 return OPVX(4, 0, 0, 0) /* vaddubm - v2.03 */
4329 return OPVX(4, 64, 0, 0) /* vadduhm - v2.03 */
4331 return OPVX(4, 128, 0, 0) /* vadduwm - v2.03 */
4333 return OPVX(4, 192, 0, 0) /* vaddudm - v2.07 */
4335 return OPVX(4, 256, 0, 0) /* vadduqm - v2.07 */
4338 return OPVX(4, 320, 0, 0) /* vaddcuq - v2.07 */
4340 return OPVX(4, 384, 0, 0) /* vaddcuw - v2.03 */
4343 return OPVX(4, 512, 0, 0) /* vaddubs - v2.03 */
4345 return OPVX(4, 576, 0, 0) /* vadduhs - v2.03 */
4347 return OPVX(4, 640, 0, 0) /* vadduws - v2.03 */
4350 return OPVX(4, 768, 0, 0) /* vaddsbs - v2.03 */
4352 return OPVX(4, 832, 0, 0) /* vaddshs - v2.03 */
4354 return OPVX(4, 896, 0, 0) /* vaddsws - v2.03 */
4357 return OPVX(4, 60, 0, 0) /* vaddeuqm - v2.07 */
4359 return OPVX(4, 61, 0, 0) /* vaddecuq - v2.07 */
4362 return OPVX(4, 776, 0, 0) /* vmulesb - v2.03 */
4364 return OPVX(4, 264, 0, 0) /* vmulosb - v2.03 */
4366 return OPVX(4, 520, 0, 0) /* vmuleub - v2.03 */
4368 return OPVX(4, 8, 0, 0) /* vmuloub - v2.03 */
4370 return OPVX(4, 840, 0, 0) /* vmulesh - v2.03 */
4372 return OPVX(4, 328, 0, 0) /* vmulosh - v2.03 */
4374 return OPVX(4, 584, 0, 0) /* vmuleuh - v2.03 */
4376 return OPVX(4, 72, 0, 0) /* vmulouh - v2.03 */
4378 return OPVX(4, 904, 0, 0) /* vmulesw - v2.07 */
4380 return OPVX(4, 392, 0, 0) /* vmulosw - v2.07 */
4382 return OPVX(4, 648, 0, 0) /* vmuleuw - v2.07 */
4384 return OPVX(4, 136, 0, 0) /* vmulouw - v2.07 */
4386 return OPVX(4, 137, 0, 0) /* vmuluwm - v2.07 */
4389 return OPVX(4, 1032, 0, 0) /* vpmsumb - v2.07 */
4391 return OPVX(4, 1096, 0, 0) /* vpmsumh - v2.07 */
4393 return OPVX(4, 1160, 0, 0) /* vpmsumw - v2.07 */
4395 return OPVX(4, 1224, 0, 0) /* vpmsumd - v2.07 */
4398 return OPVX(4, 35, 0, 0) /* vmsumudm - v3.00b */
4401 return OPVX(4, 1024, 0, 0) /* vsububm - v2.03 */
4403 return OPVX(4, 1088, 0, 0) /* vsubuhm - v2.03 */
4405 return OPVX(4, 1152, 0, 0) /* vsubuwm - v2.03 */
4407 return OPVX(4, 1216, 0, 0) /* vsubudm - v2.07 */
4409 return OPVX(4, 1280, 0, 0) /* vsubuqm - v2.07 */
4412 return OPVX(4, 1344, 0, 0) /* vsubcuq - v2.07 */
4414 return OPVX(4, 1408, 0, 0) /* vsubcuw - v2.03 */
4417 return OPVX(4, 1536, 0, 0) /* vsububs - v2.03 */
4419 return OPVX(4, 1600, 0, 0) /* vsubuhs - v2.03 */
4421 return OPVX(4, 1664, 0, 0) /* vsubuws - v2.03 */
4424 return OPVX(4, 1792, 0, 0) /* vsubsbs - v2.03 */
4426 return OPVX(4, 1856, 0, 0) /* vsubshs - v2.03 */
4428 return OPVX(4, 1920, 0, 0) /* vsubsws - v2.03 */
4431 return OPVX(4, 62, 0, 0) /* vsubeuqm - v2.07 */
4433 return OPVX(4, 63, 0, 0) /* vsubecuq - v2.07 */
4436 return OPVX(4, 4, 0, 0) /* vrlb - v2.03 */
4438 return OPVX(4, 68, 0, 0) /* vrlh - v2.03 */
4440 return OPVX(4, 132, 0, 0) /* vrlw - v2.03 */
4442 return OPVX(4, 196, 0, 0) /* vrld - v2.07 */
4445 return OPVX(4, 1676, 0, 0) /* vmrgow - v2.07 */
4447 return OPVX(4, 1932, 0, 0) /* vmrgew - v2.07 */
4450 return OPVX(4, 260, 0, 0) /* vslh - v2.03 */
4452 return OPVX(4, 324, 0, 0) /* vslh - v2.03 */
4454 return OPVX(4, 388, 0, 0) /* vslw - v2.03 */
4456 return OPVX(4, 452, 0, 0) /* vsl - v2.03 */
4458 return OPVX(4, 1036, 0, 0) /* vsl - v2.03 */
4460 return OPVX(4, 516, 0, 0) /* vsrb - v2.03 */
4462 return OPVX(4, 580, 0, 0) /* vsrh - v2.03 */
4464 return OPVX(4, 644, 0, 0) /* vsrw - v2.03 */
4466 return OPVX(4, 708, 0, 0) /* vsr - v2.03 */
4468 return OPVX(4, 1100, 0, 0) /* vsro - v2.03 */
4470 return OPVX(4, 1476, 0, 0) /* vsld - v2.07 */
4472 return OPVX(4, 1732, 0, 0) /* vsrd - v2.07 */
4475 return OPVX(4, 772, 0, 0) /* vsrab - v2.03 */
4477 return OPVX(4, 836, 0, 0) /* vsrah - v2.03 */
4479 return OPVX(4, 900, 0, 0) /* vsraw - v2.03 */
4481 return OPVX(4, 964, 0, 0) /* vsrad - v2.07 */
4484 return OPVC(4, 1356, 0, 0) /* vbpermq - v2.07 */
4486 return OPVC(4, 1484, 0, 0) /* vbpermd - v3.00 */
4489 return OPVX(4, 1794, 0, 0) /* vclzb - v2.07 */
4491 return OPVX(4, 1858, 0, 0) /* vclzh - v2.07 */
4493 return OPVX(4, 1922, 0, 0) /* vclzw - v2.07 */
4495 return OPVX(4, 1986, 0, 0) /* vclzd - v2.07 */
4498 return OPVX(4, 1795, 0, 0) /* vpopcntb - v2.07 */
4500 return OPVX(4, 1859, 0, 0) /* vpopcnth - v2.07 */
4502 return OPVX(4, 1923, 0, 0) /* vpopcntw - v2.07 */
4504 return OPVX(4, 1987, 0, 0) /* vpopcntd - v2.07 */
4507 return OPVC(4, 6, 0, 0) /* vcmpequb - v2.03 */
4509 return OPVC(4, 6, 0, 1) /* vcmpequb. - v2.03 */
4511 return OPVC(4, 70, 0, 0) /* vcmpequh - v2.03 */
4513 return OPVC(4, 70, 0, 1) /* vcmpequh. - v2.03 */
4515 return OPVC(4, 134, 0, 0) /* vcmpequw - v2.03 */
4517 return OPVC(4, 134, 0, 1) /* vcmpequw. - v2.03 */
4519 return OPVC(4, 199, 0, 0) /* vcmpequd - v2.07 */
4521 return OPVC(4, 199, 0, 1) /* vcmpequd. - v2.07 */
4524 return OPVC(4, 518, 0, 0) /* vcmpgtub - v2.03 */
4526 return OPVC(4, 518, 0, 1) /* vcmpgtub. - v2.03 */
4528 return OPVC(4, 582, 0, 0) /* vcmpgtuh - v2.03 */
4530 return OPVC(4, 582, 0, 1) /* vcmpgtuh. - v2.03 */
4532 return OPVC(4, 646, 0, 0) /* vcmpgtuw - v2.03 */
4534 return OPVC(4, 646, 0, 1) /* vcmpgtuw. - v2.03 */
4536 return OPVC(4, 711, 0, 0) /* vcmpgtud - v2.07 */
4538 return OPVC(4, 711, 0, 1) /* vcmpgtud. v2.07 */
4540 return OPVC(4, 774, 0, 0) /* vcmpgtsb - v2.03 */
4542 return OPVC(4, 774, 0, 1) /* vcmpgtsb. - v2.03 */
4544 return OPVC(4, 838, 0, 0) /* vcmpgtsh - v2.03 */
4546 return OPVC(4, 838, 0, 1) /* vcmpgtsh. - v2.03 */
4548 return OPVC(4, 902, 0, 0) /* vcmpgtsw - v2.03 */
4550 return OPVC(4, 902, 0, 1) /* vcmpgtsw. - v2.03 */
4552 return OPVC(4, 967, 0, 0) /* vcmpgtsd - v2.07 */
4554 return OPVC(4, 967, 0, 1) /* vcmpgtsd. - v2.07 */
4557 return OPVC(4, 263, 0, 0) /* vcmpnezb - v3.00 */
4559 return OPVC(4, 263, 0, 1) /* vcmpnezb. - v3.00 */
4561 return OPVC(4, 7, 0, 0) /* vcmpneb - v3.00 */
4563 return OPVC(4, 7, 0, 1) /* vcmpneb. - v3.00 */
4565 return OPVC(4, 71, 0, 0) /* vcmpneh - v3.00 */
4567 return OPVC(4, 71, 0, 1) /* vcmpneh. - v3.00 */
4569 return OPVC(4, 135, 0, 0) /* vcmpnew - v3.00 */
4571 return OPVC(4, 135, 0, 1) /* vcmpnew. - v3.00 */
4574 return OPVX(4, 43, 0, 0) /* vperm - v2.03 */
4576 return OPVX(4, 45, 0, 0) /* vpermxor - v2.03 */
4578 return OPVX(4, 59, 0, 0) /* vpermr - v3.0 */
4581 return OPVX(4, 42, 0, 0) /* vsel - v2.03 */
4584 return OPVX(4, 1288, 0, 0) /* vcipher - v2.07 */
4586 return OPVX(4, 1289, 0, 0) /* vcipherlast - v2.07 */
4588 return OPVX(4, 1352, 0, 0) /* vncipher - v2.07 */
4590 return OPVX(4, 1353, 0, 0) /* vncipherlast - v2.07 */
4592 return OPVX(4, 1480, 0, 0) /* vsbox - v2.07 */
4593 /* End of vector instructions */
4595 /* Vector scalar (VSX) instructions */
4596 /* ISA 2.06 enables these for POWER7. */
4597 case AMFVSRD, AMFVRD, AMFFPRD:
4598 return OPVXX1(31, 51, 0) /* mfvsrd - v2.07 */
4600 return OPVXX1(31, 115, 0) /* mfvsrwz - v2.07 */
4602 return OPVXX1(31, 307, 0) /* mfvsrld - v3.00 */
4604 case AMTVSRD, AMTFPRD, AMTVRD:
4605 return OPVXX1(31, 179, 0) /* mtvsrd - v2.07 */
4607 return OPVXX1(31, 211, 0) /* mtvsrwa - v2.07 */
4609 return OPVXX1(31, 243, 0) /* mtvsrwz - v2.07 */
4611 return OPVXX1(31, 435, 0) /* mtvsrdd - v3.00 */
4613 return OPVXX1(31, 403, 0) /* mtvsrws - v3.00 */
4616 return OPVXX3(60, 130, 0) /* xxland - v2.06 */
4618 return OPVXX3(60, 138, 0) /* xxlandc - v2.06 */
4620 return OPVXX3(60, 186, 0) /* xxleqv - v2.07 */
4622 return OPVXX3(60, 178, 0) /* xxlnand - v2.07 */
4625 return OPVXX3(60, 170, 0) /* xxlorc - v2.07 */
4627 return OPVXX3(60, 162, 0) /* xxlnor - v2.06 */
4628 case AXXLOR, AXXLORQ:
4629 return OPVXX3(60, 146, 0) /* xxlor - v2.06 */
4631 return OPVXX3(60, 154, 0) /* xxlxor - v2.06 */
4634 return OPVXX4(60, 3, 0) /* xxsel - v2.06 */
4637 return OPVXX3(60, 18, 0) /* xxmrghw - v2.06 */
4639 return OPVXX3(60, 50, 0) /* xxmrglw - v2.06 */
4642 return OPVXX2(60, 164, 0) /* xxspltw - v2.06 */
4645 return OPVCC(60, 360, 0, 0) /* xxspltib - v3.0 */
4648 return OPVXX3(60, 26, 0) /* xxperm - v2.06 */
4650 return OPVXX3(60, 10, 0) /* xxpermdi - v2.06 */
4653 return OPVXX3(60, 2, 0) /* xxsldwi - v2.06 */
4656 return OPVXX2VA(60, 475, 31) /* xxbrq - v3.0 */
4658 return OPVXX2VA(60, 475, 23) /* xxbrd - v3.0 */
4660 return OPVXX2VA(60, 475, 15) /* xxbrw - v3.0 */
4662 return OPVXX2VA(60, 475, 7) /* xxbrh - v3.0 */
4665 return OPVXX2(60, 265, 0) /* xscvdpsp - v2.06 */
4667 return OPVXX2(60, 329, 0) /* xscvspdp - v2.06 */
4669 return OPVXX2(60, 267, 0) /* xscvdpspn - v2.07 */
4671 return OPVXX2(60, 331, 0) /* xscvspdpn - v2.07 */
4674 return OPVXX2(60, 393, 0) /* xvcvdpsp - v2.06 */
4676 return OPVXX2(60, 457, 0) /* xvcvspdp - v2.06 */
4679 return OPVXX2(60, 344, 0) /* xscvdpsxds - v2.06 */
4681 return OPVXX2(60, 88, 0) /* xscvdpsxws - v2.06 */
4683 return OPVXX2(60, 328, 0) /* xscvdpuxds - v2.06 */
4685 return OPVXX2(60, 72, 0) /* xscvdpuxws - v2.06 */
4688 return OPVXX2(60, 376, 0) /* xscvsxddp - v2.06 */
4690 return OPVXX2(60, 360, 0) /* xscvuxddp - v2.06 */
4692 return OPVXX2(60, 312, 0) /* xscvsxdsp - v2.06 */
4694 return OPVXX2(60, 296, 0) /* xscvuxdsp - v2.06 */
4697 return OPVXX2(60, 472, 0) /* xvcvdpsxds - v2.06 */
4699 return OPVXX2(60, 216, 0) /* xvcvdpsxws - v2.06 */
4701 return OPVXX2(60, 456, 0) /* xvcvdpuxds - v2.06 */
4703 return OPVXX2(60, 200, 0) /* xvcvdpuxws - v2.06 */
4705 return OPVXX2(60, 408, 0) /* xvcvspsxds - v2.07 */
4707 return OPVXX2(60, 152, 0) /* xvcvspsxws - v2.07 */
4709 return OPVXX2(60, 392, 0) /* xvcvspuxds - v2.07 */
4711 return OPVXX2(60, 136, 0) /* xvcvspuxws - v2.07 */
4714 return OPVXX2(60, 504, 0) /* xvcvsxddp - v2.06 */
4716 return OPVXX2(60, 248, 0) /* xvcvsxwdp - v2.06 */
4718 return OPVXX2(60, 488, 0) /* xvcvuxddp - v2.06 */
4720 return OPVXX2(60, 232, 0) /* xvcvuxwdp - v2.06 */
4722 return OPVXX2(60, 440, 0) /* xvcvsxdsp - v2.06 */
4724 return OPVXX2(60, 184, 0) /* xvcvsxwsp - v2.06 */
4726 return OPVXX2(60, 424, 0) /* xvcvuxdsp - v2.06 */
4728 return OPVXX2(60, 168, 0) /* xvcvuxwsp - v2.06 */
4729 /* End of VSX instructions */
4732 return OPVX(4, 48, 0, 0) /* maddhd - v3.00 */
4734 return OPVX(4, 49, 0, 0) /* maddhdu - v3.00 */
4736 return OPVX(4, 51, 0, 0) /* maddld - v3.00 */
4739 return OPVCC(31, 316, 0, 0)
4741 return OPVCC(31, 316, 0, 1)
4744 c.ctxt.Diag("bad r/r, r/r/r or r/r/r/r opcode %v", a)
4748 func (c *ctxt9) opirrr(a obj.As) uint32 {
4750 /* Vector (VMX/Altivec) instructions */
4751 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
4752 /* are enabled starting at POWER6 (ISA 2.05). */
4754 return OPVX(4, 44, 0, 0) /* vsldoi - v2.03 */
4757 c.ctxt.Diag("bad i/r/r/r opcode %v", a)
4761 func (c *ctxt9) opiirr(a obj.As) uint32 {
4763 /* Vector (VMX/Altivec) instructions */
4764 /* ISA 2.07 enables these for POWER8 and beyond. */
4766 return OPVX(4, 1666, 0, 0) /* vshasigmaw - v2.07 */
4768 return OPVX(4, 1730, 0, 0) /* vshasigmad - v2.07 */
4771 c.ctxt.Diag("bad i/i/r/r opcode %v", a)
4775 func (c *ctxt9) opirr(a obj.As) uint32 {
4778 return OPVCC(14, 0, 0, 0)
4780 return OPVCC(12, 0, 0, 0)
4782 return OPVCC(13, 0, 0, 0)
4784 return OPVCC(15, 0, 0, 0) /* ADDIS */
4787 return OPVCC(28, 0, 0, 0)
4789 return OPVCC(29, 0, 0, 0) /* ANDIS. */
4792 return OPVCC(18, 0, 0, 0)
4794 return OPVCC(18, 0, 0, 0) | 1
4796 return OPVCC(18, 0, 0, 0) | 1
4798 return OPVCC(18, 0, 0, 0) | 1
4800 return OPVCC(16, 0, 0, 0)
4802 return OPVCC(16, 0, 0, 0) | 1
4805 return AOP_RRR(16<<26, 12, 2, 0)
4807 return AOP_RRR(16<<26, 4, 0, 0)
4809 return AOP_RRR(16<<26, 12, 1, 0)
4811 return AOP_RRR(16<<26, 4, 1, 0)
4813 return AOP_RRR(16<<26, 12, 0, 0)
4815 return AOP_RRR(16<<26, 4, 2, 0)
4817 return AOP_RRR(16<<26, 4, 3, 0) // apparently unordered-clear
4819 return AOP_RRR(16<<26, 12, 3, 0) // apparently unordered-set
4822 return OPVCC(11, 0, 0, 0) | 1<<21 /* L=1 */
4824 return OPVCC(10, 0, 0, 0) | 1<<21
4826 return OPVCC(11, 0, 0, 0) /* L=0 */
4828 return OPVCC(10, 0, 0, 0)
4830 return OPVCC(31, 224, 0, 0) /* cmpeqb - v3.00 */
4833 return OPVCC(31, 597, 0, 0)
4836 return OPVCC(31, 774, 0, 0) /* copy - v3.00 */
4838 return OPVCC(31, 902, 0, 1) /* paste. - v3.00 */
4840 return OPVCC(31, 755, 0, 0) /* darn - v3.00 */
4842 case AMULLW, AMULLD:
4843 return OPVCC(7, 0, 0, 0) /* mulli works with MULLW or MULLD */
4846 return OPVCC(24, 0, 0, 0)
4848 return OPVCC(25, 0, 0, 0) /* ORIS */
4851 return OPVCC(20, 0, 0, 0) /* rlwimi */
4853 return OPVCC(20, 0, 0, 1)
4855 return OPMD(30, 3, 0) /* rldimi */
4857 return OPMD(30, 3, 1) /* rldimi. */
4859 return OPMD(30, 3, 0) /* rldimi */
4861 return OPMD(30, 3, 1) /* rldimi. */
4863 return OPVCC(21, 0, 0, 0) /* rlwinm */
4865 return OPVCC(21, 0, 0, 1)
4868 return OPMD(30, 0, 0) /* rldicl */
4870 return OPMD(30, 0, 1) /* rldicl. */
4872 return OPMD(30, 1, 0) /* rldicr */
4874 return OPMD(30, 1, 1) /* rldicr. */
4876 return OPMD(30, 2, 0) /* rldic */
4878 return OPMD(30, 2, 1) /* rldic. */
4881 return OPVCC(31, 824, 0, 0)
4883 return OPVCC(31, 824, 0, 1)
4885 return OPVCC(31, (413 << 1), 0, 0)
4887 return OPVCC(31, (413 << 1), 0, 1)
4889 return OPVCC(31, 445, 0, 0)
4891 return OPVCC(31, 445, 0, 1)
4894 return OPVCC(31, 725, 0, 0)
4897 return OPVCC(8, 0, 0, 0)
4900 return OPVCC(3, 0, 0, 0)
4902 return OPVCC(2, 0, 0, 0)
4904 /* Vector (VMX/Altivec) instructions */
4905 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
4906 /* are enabled starting at POWER6 (ISA 2.05). */
4908 return OPVX(4, 524, 0, 0) /* vspltb - v2.03 */
4910 return OPVX(4, 588, 0, 0) /* vsplth - v2.03 */
4912 return OPVX(4, 652, 0, 0) /* vspltw - v2.03 */
4915 return OPVX(4, 780, 0, 0) /* vspltisb - v2.03 */
4917 return OPVX(4, 844, 0, 0) /* vspltish - v2.03 */
4919 return OPVX(4, 908, 0, 0) /* vspltisw - v2.03 */
4920 /* End of vector instructions */
4923 return OPVCC(63, 128, 0, 0) /* ftdiv - v2.06 */
4925 return OPVCC(63, 160, 0, 0) /* ftsqrt - v2.06 */
4928 return OPVCC(26, 0, 0, 0) /* XORIL */
4930 return OPVCC(27, 0, 0, 0) /* XORIS */
4933 c.ctxt.Diag("bad opcode i/r or i/r/r %v", a)
4940 func (c *ctxt9) opload(a obj.As) uint32 {
4943 return OPVCC(58, 0, 0, 0) /* ld */
4945 return OPVCC(58, 0, 0, 1) /* ldu */
4947 return OPVCC(32, 0, 0, 0) /* lwz */
4949 return OPVCC(33, 0, 0, 0) /* lwzu */
4951 return OPVCC(58, 0, 0, 0) | 1<<1 /* lwa */
4953 return OPDQ(61, 1, 0) /* lxv - ISA v3.0 */
4955 return OPVXX1(31, 269, 0) /* lxvl - ISA v3.0 */
4957 return OPVXX1(31, 301, 0) /* lxvll - ISA v3.0 */
4959 return OPVXX1(31, 268, 0) /* lxvx - ISA v3.0 */
4963 return OPVCC(34, 0, 0, 0)
4966 case AMOVBU, AMOVBZU:
4967 return OPVCC(35, 0, 0, 0)
4969 return OPVCC(50, 0, 0, 0)
4971 return OPVCC(51, 0, 0, 0)
4973 return OPVCC(48, 0, 0, 0)
4975 return OPVCC(49, 0, 0, 0)
4977 return OPVCC(42, 0, 0, 0)
4979 return OPVCC(43, 0, 0, 0)
4981 return OPVCC(40, 0, 0, 0)
4983 return OPVCC(41, 0, 0, 0)
4985 return OPVCC(46, 0, 0, 0) /* lmw */
4988 c.ctxt.Diag("bad load opcode %v", a)
4993 * indexed load a(b),d
4995 func (c *ctxt9) oploadx(a obj.As) uint32 {
4998 return OPVCC(31, 23, 0, 0) /* lwzx */
5000 return OPVCC(31, 55, 0, 0) /* lwzux */
5002 return OPVCC(31, 341, 0, 0) /* lwax */
5004 return OPVCC(31, 373, 0, 0) /* lwaux */
5007 return OPVCC(31, 87, 0, 0) /* lbzx */
5009 case AMOVBU, AMOVBZU:
5010 return OPVCC(31, 119, 0, 0) /* lbzux */
5012 return OPVCC(31, 599, 0, 0) /* lfdx */
5014 return OPVCC(31, 631, 0, 0) /* lfdux */
5016 return OPVCC(31, 535, 0, 0) /* lfsx */
5018 return OPVCC(31, 567, 0, 0) /* lfsux */
5020 return OPVCC(31, 855, 0, 0) /* lfiwax - power6, isa 2.05 */
5022 return OPVCC(31, 887, 0, 0) /* lfiwzx - power7, isa 2.06 */
5024 return OPVCC(31, 343, 0, 0) /* lhax */
5026 return OPVCC(31, 375, 0, 0) /* lhaux */
5028 return OPVCC(31, 790, 0, 0) /* lhbrx */
5030 return OPVCC(31, 534, 0, 0) /* lwbrx */
5032 return OPVCC(31, 532, 0, 0) /* ldbrx */
5034 return OPVCC(31, 279, 0, 0) /* lhzx */
5036 return OPVCC(31, 311, 0, 0) /* lhzux */
5038 return OPVCC(31, 310, 0, 0) /* eciwx */
5040 return OPVCC(31, 52, 0, 0) /* lbarx */
5042 return OPVCC(31, 116, 0, 0) /* lharx */
5044 return OPVCC(31, 20, 0, 0) /* lwarx */
5046 return OPVCC(31, 84, 0, 0) /* ldarx */
5048 return OPVCC(31, 533, 0, 0) /* lswx */
5050 return OPVCC(31, 21, 0, 0) /* ldx */
5052 return OPVCC(31, 53, 0, 0) /* ldux */
5054 return OPVCC(31, 309, 0, 0) /* ldmx */
5056 /* Vector (VMX/Altivec) instructions */
5058 return OPVCC(31, 7, 0, 0) /* lvebx - v2.03 */
5060 return OPVCC(31, 39, 0, 0) /* lvehx - v2.03 */
5062 return OPVCC(31, 71, 0, 0) /* lvewx - v2.03 */
5064 return OPVCC(31, 103, 0, 0) /* lvx - v2.03 */
5066 return OPVCC(31, 359, 0, 0) /* lvxl - v2.03 */
5068 return OPVCC(31, 6, 0, 0) /* lvsl - v2.03 */
5070 return OPVCC(31, 38, 0, 0) /* lvsr - v2.03 */
5071 /* End of vector instructions */
5073 /* Vector scalar (VSX) instructions */
5075 return OPVXX1(31, 268, 0) /* lxvx - ISA v3.0 */
5077 return OPVXX1(31, 844, 0) /* lxvd2x - v2.06 */
5079 return OPVXX1(31, 780, 0) /* lxvw4x - v2.06 */
5081 return OPVXX1(31, 812, 0) /* lxvh8x - v3.00 */
5083 return OPVXX1(31, 876, 0) /* lxvb16x - v3.00 */
5085 return OPVXX1(31, 332, 0) /* lxvdsx - v2.06 */
5087 return OPVXX1(31, 588, 0) /* lxsdx - v2.06 */
5089 return OPVXX1(31, 76, 0) /* lxsiwax - v2.07 */
5091 return OPVXX1(31, 12, 0) /* lxsiwzx - v2.07 */
5094 c.ctxt.Diag("bad loadx opcode %v", a)
5101 func (c *ctxt9) opstore(a obj.As) uint32 {
5104 return OPVCC(38, 0, 0, 0) /* stb */
5106 case AMOVBU, AMOVBZU:
5107 return OPVCC(39, 0, 0, 0) /* stbu */
5109 return OPVCC(54, 0, 0, 0) /* stfd */
5111 return OPVCC(55, 0, 0, 0) /* stfdu */
5113 return OPVCC(52, 0, 0, 0) /* stfs */
5115 return OPVCC(53, 0, 0, 0) /* stfsu */
5118 return OPVCC(44, 0, 0, 0) /* sth */
5120 case AMOVHZU, AMOVHU:
5121 return OPVCC(45, 0, 0, 0) /* sthu */
5123 return OPVCC(47, 0, 0, 0) /* stmw */
5125 return OPVCC(31, 725, 0, 0) /* stswi */
5128 return OPVCC(36, 0, 0, 0) /* stw */
5130 case AMOVWZU, AMOVWU:
5131 return OPVCC(37, 0, 0, 0) /* stwu */
5133 return OPVCC(62, 0, 0, 0) /* std */
5135 return OPVCC(62, 0, 0, 1) /* stdu */
5137 return OPDQ(61, 5, 0) /* stxv ISA 3.0 */
5139 return OPVXX1(31, 397, 0) /* stxvl ISA 3.0 */
5141 return OPVXX1(31, 429, 0) /* stxvll ISA 3.0 */
5143 return OPVXX1(31, 396, 0) /* stxvx - ISA v3.0 */
5147 c.ctxt.Diag("unknown store opcode %v", a)
5152 * indexed store s,a(b)
5154 func (c *ctxt9) opstorex(a obj.As) uint32 {
5157 return OPVCC(31, 215, 0, 0) /* stbx */
5159 case AMOVBU, AMOVBZU:
5160 return OPVCC(31, 247, 0, 0) /* stbux */
5162 return OPVCC(31, 727, 0, 0) /* stfdx */
5164 return OPVCC(31, 759, 0, 0) /* stfdux */
5166 return OPVCC(31, 663, 0, 0) /* stfsx */
5168 return OPVCC(31, 695, 0, 0) /* stfsux */
5170 return OPVCC(31, 983, 0, 0) /* stfiwx */
5173 return OPVCC(31, 407, 0, 0) /* sthx */
5175 return OPVCC(31, 918, 0, 0) /* sthbrx */
5177 case AMOVHZU, AMOVHU:
5178 return OPVCC(31, 439, 0, 0) /* sthux */
5181 return OPVCC(31, 151, 0, 0) /* stwx */
5183 case AMOVWZU, AMOVWU:
5184 return OPVCC(31, 183, 0, 0) /* stwux */
5186 return OPVCC(31, 661, 0, 0) /* stswx */
5188 return OPVCC(31, 662, 0, 0) /* stwbrx */
5190 return OPVCC(31, 660, 0, 0) /* stdbrx */
5192 return OPVCC(31, 694, 0, 1) /* stbcx. */
5194 return OPVCC(31, 726, 0, 1) /* sthcx. */
5196 return OPVCC(31, 150, 0, 1) /* stwcx. */
5198 return OPVCC(31, 214, 0, 1) /* stwdx. */
5200 return OPVCC(31, 438, 0, 0) /* ecowx */
5202 return OPVCC(31, 149, 0, 0) /* stdx */
5204 return OPVCC(31, 181, 0, 0) /* stdux */
5206 /* Vector (VMX/Altivec) instructions */
5208 return OPVCC(31, 135, 0, 0) /* stvebx - v2.03 */
5210 return OPVCC(31, 167, 0, 0) /* stvehx - v2.03 */
5212 return OPVCC(31, 199, 0, 0) /* stvewx - v2.03 */
5214 return OPVCC(31, 231, 0, 0) /* stvx - v2.03 */
5216 return OPVCC(31, 487, 0, 0) /* stvxl - v2.03 */
5217 /* End of vector instructions */
5219 /* Vector scalar (VSX) instructions */
5221 return OPVXX1(31, 396, 0) /* stxvx - v3.0 */
5223 return OPVXX1(31, 972, 0) /* stxvd2x - v2.06 */
5225 return OPVXX1(31, 908, 0) /* stxvw4x - v2.06 */
5227 return OPVXX1(31, 940, 0) /* stxvh8x - v3.0 */
5229 return OPVXX1(31, 1004, 0) /* stxvb16x - v3.0 */
5232 return OPVXX1(31, 716, 0) /* stxsdx - v2.06 */
5235 return OPVXX1(31, 140, 0) /* stxsiwx - v2.07 */
5237 /* End of vector scalar instructions */
5241 c.ctxt.Diag("unknown storex opcode %v", a)