1 // cmd/9l/optab.c, cmd/9l/asmout.c from Vita Nuova.
3 // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
4 // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
5 // Portions Copyright © 1997-1999 Vita Nuova Limited
6 // Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com)
7 // Portions Copyright © 2004,2006 Bruce Ellis
8 // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
9 // Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others
10 // Portions Copyright © 2009 The Go Authors. All rights reserved.
12 // Permission is hereby granted, free of charge, to any person obtaining a copy
13 // of this software and associated documentation files (the "Software"), to deal
14 // in the Software without restriction, including without limitation the rights
15 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
16 // copies of the Software, and to permit persons to whom the Software is
17 // furnished to do so, subject to the following conditions:
19 // The above copyright notice and this permission notice shall be included in
20 // all copies or substantial portions of the Software.
22 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
27 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
43 // ctxt9 holds state while assembling a single function.
44 // Each function gets a fresh ctxt9.
45 // This allows for multiple functions to be safely concurrently assembled.
55 // Instruction layout.
63 a1 uint8 // p.From argument (obj.Addr). p is of type obj.Prog.
64 a2 uint8 // p.Reg argument (int16 Register)
65 a3 uint8 // p.RestArgs[0] (obj.AddrPos)
66 a4 uint8 // p.RestArgs[1]
67 a5 uint8 // p.RestARgs[2]
68 a6 uint8 // p.To (obj.Addr)
69 type_ int8 // cases in asmout below. E.g., 44 = st r,(ra+rb); 45 = ld (ra+rb), r
70 size int8 // Text space in bytes to lay operation
72 // A prefixed instruction is generated by this opcode. This cannot be placed
73 // across a 64B PC address. Opcodes should not translate to more than one
74 // prefixed instruction. The prefixed instruction should be written first
75 // (e.g when Optab.size > 8).
78 asmout func(*ctxt9, *obj.Prog, *Optab, *[5]uint32)
81 // optab contains an array to be sliced of accepted operand combinations for an
82 // instruction. Unused arguments and fields are not explicitly enumerated, and
83 // should not be listed for clarity. Unused arguments and values should always
84 // assume the default value for the given type.
86 // optab does not list every valid ppc64 opcode, it enumerates representative
87 // operand combinations for a class of instruction. The variable oprange indexes
88 // all valid ppc64 opcodes.
90 // oprange is initialized to point a slice within optab which contains the valid
91 // operand combinations for a given instruction. This is initialized from buildop.
93 // Likewise, each slice of optab is dynamically sorted using the ocmp Sort interface
94 // to arrange entries to minimize text size of each opcode.
96 {as: obj.ATEXT, a1: C_LOREG, a6: C_TEXTSIZE, type_: 0, size: 0},
97 {as: obj.ATEXT, a1: C_LOREG, a3: C_LCON, a6: C_TEXTSIZE, type_: 0, size: 0},
98 {as: obj.ATEXT, a1: C_ADDR, a6: C_TEXTSIZE, type_: 0, size: 0},
99 {as: obj.ATEXT, a1: C_ADDR, a3: C_LCON, a6: C_TEXTSIZE, type_: 0, size: 0},
101 {as: AADD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
102 {as: AADD, a1: C_REG, a6: C_REG, type_: 2, size: 4},
103 {as: AADD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
104 {as: AADD, a1: C_SCON, a6: C_REG, type_: 4, size: 4},
105 {as: AADD, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
106 {as: AADD, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
107 {as: AADD, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 20, size: 4},
108 {as: AADD, a1: C_UCON, a6: C_REG, type_: 20, size: 4},
109 {as: AADD, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 22, size: 8},
110 {as: AADD, a1: C_ANDCON, a6: C_REG, type_: 22, size: 8},
111 {as: AADD, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12},
112 {as: AADD, a1: C_LCON, a6: C_REG, type_: 22, size: 12},
113 {as: AADDIS, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 20, size: 4},
114 {as: AADDIS, a1: C_ADDCON, a6: C_REG, type_: 20, size: 4},
115 {as: AADDC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
116 {as: AADDC, a1: C_REG, a6: C_REG, type_: 2, size: 4},
117 {as: AADDC, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
118 {as: AADDC, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
119 {as: AADDC, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12},
120 {as: AADDC, a1: C_LCON, a6: C_REG, type_: 22, size: 12},
121 {as: AAND, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, /* logical, no literal */
122 {as: AAND, a1: C_REG, a6: C_REG, type_: 6, size: 4},
123 {as: AANDCC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
124 {as: AANDCC, a1: C_REG, a6: C_REG, type_: 6, size: 4},
125 {as: AANDCC, a1: C_ANDCON, a6: C_REG, type_: 58, size: 4},
126 {as: AANDCC, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 58, size: 4},
127 {as: AANDCC, a1: C_UCON, a6: C_REG, type_: 59, size: 4},
128 {as: AANDCC, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
129 {as: AANDCC, a1: C_ADDCON, a6: C_REG, type_: 23, size: 8},
130 {as: AANDCC, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 23, size: 8},
131 {as: AANDCC, a1: C_LCON, a6: C_REG, type_: 23, size: 12},
132 {as: AANDCC, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 23, size: 12},
133 {as: AANDISCC, a1: C_ANDCON, a6: C_REG, type_: 59, size: 4},
134 {as: AANDISCC, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
135 {as: AMULLW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
136 {as: AMULLW, a1: C_REG, a6: C_REG, type_: 2, size: 4},
137 {as: AMULLW, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
138 {as: AMULLW, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
139 {as: AMULLW, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
140 {as: AMULLW, a1: C_ANDCON, a6: C_REG, type_: 4, size: 4},
141 {as: AMULLW, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12},
142 {as: AMULLW, a1: C_LCON, a6: C_REG, type_: 22, size: 12},
143 {as: ASUBC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 10, size: 4},
144 {as: ASUBC, a1: C_REG, a6: C_REG, type_: 10, size: 4},
145 {as: ASUBC, a1: C_REG, a3: C_ADDCON, a6: C_REG, type_: 27, size: 4},
146 {as: ASUBC, a1: C_REG, a3: C_LCON, a6: C_REG, type_: 28, size: 12},
147 {as: AOR, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, /* logical, literal not cc (or/xor) */
148 {as: AOR, a1: C_REG, a6: C_REG, type_: 6, size: 4},
149 {as: AOR, a1: C_ANDCON, a6: C_REG, type_: 58, size: 4},
150 {as: AOR, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 58, size: 4},
151 {as: AOR, a1: C_UCON, a6: C_REG, type_: 59, size: 4},
152 {as: AOR, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
153 {as: AOR, a1: C_ADDCON, a6: C_REG, type_: 23, size: 8},
154 {as: AOR, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 23, size: 8},
155 {as: AOR, a1: C_LCON, a6: C_REG, type_: 23, size: 12},
156 {as: AOR, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 23, size: 12},
157 {as: AORIS, a1: C_ANDCON, a6: C_REG, type_: 59, size: 4},
158 {as: AORIS, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
159 {as: ADIVW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4}, /* op r1[,r2],r3 */
160 {as: ADIVW, a1: C_REG, a6: C_REG, type_: 2, size: 4},
161 {as: ASUB, a1: C_REG, a2: C_REG, a6: C_REG, type_: 10, size: 4}, /* op r2[,r1],r3 */
162 {as: ASUB, a1: C_REG, a6: C_REG, type_: 10, size: 4},
163 {as: ASLW, a1: C_REG, a6: C_REG, type_: 6, size: 4},
164 {as: ASLW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
165 {as: ASLD, a1: C_REG, a6: C_REG, type_: 6, size: 4},
166 {as: ASLD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
167 {as: ASLD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 25, size: 4},
168 {as: ASLD, a1: C_SCON, a6: C_REG, type_: 25, size: 4},
169 {as: AEXTSWSLI, a1: C_SCON, a6: C_REG, type_: 25, size: 4},
170 {as: AEXTSWSLI, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 25, size: 4},
171 {as: ASLW, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 57, size: 4},
172 {as: ASLW, a1: C_SCON, a6: C_REG, type_: 57, size: 4},
173 {as: ASRAW, a1: C_REG, a6: C_REG, type_: 6, size: 4},
174 {as: ASRAW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
175 {as: ASRAW, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 56, size: 4},
176 {as: ASRAW, a1: C_SCON, a6: C_REG, type_: 56, size: 4},
177 {as: ASRAD, a1: C_REG, a6: C_REG, type_: 6, size: 4},
178 {as: ASRAD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
179 {as: ASRAD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 56, size: 4},
180 {as: ASRAD, a1: C_SCON, a6: C_REG, type_: 56, size: 4},
181 {as: ARLWMI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 62, size: 4},
182 {as: ARLWMI, a1: C_SCON, a2: C_REG, a3: C_SCON, a4: C_SCON, a6: C_REG, type_: 102, size: 4},
183 {as: ARLWMI, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 63, size: 4},
184 {as: ARLWMI, a1: C_REG, a2: C_REG, a3: C_SCON, a4: C_SCON, a6: C_REG, type_: 103, size: 4},
185 {as: ACLRLSLWI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 62, size: 4},
186 {as: ARLDMI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 30, size: 4},
187 {as: ARLDC, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 29, size: 4},
188 {as: ARLDCL, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 29, size: 4},
189 {as: ARLDCL, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
190 {as: ARLDICL, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
191 {as: ARLDICL, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
192 {as: ARLDCL, a1: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
193 {as: AFADD, a1: C_FREG, a6: C_FREG, type_: 2, size: 4},
194 {as: AFADD, a1: C_FREG, a2: C_FREG, a6: C_FREG, type_: 2, size: 4},
195 {as: AFABS, a1: C_FREG, a6: C_FREG, type_: 33, size: 4},
196 {as: AFABS, a6: C_FREG, type_: 33, size: 4},
197 {as: AFMADD, a1: C_FREG, a2: C_FREG, a3: C_FREG, a6: C_FREG, type_: 34, size: 4},
198 {as: AFMUL, a1: C_FREG, a6: C_FREG, type_: 32, size: 4},
199 {as: AFMUL, a1: C_FREG, a2: C_FREG, a6: C_FREG, type_: 32, size: 4},
201 {as: AMOVBU, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
202 {as: AMOVBU, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
203 {as: AMOVBU, a1: C_SOREG, a6: C_REG, type_: 8, size: 8},
204 {as: AMOVBU, a1: C_XOREG, a6: C_REG, type_: 109, size: 8},
206 {as: AMOVBZU, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
207 {as: AMOVBZU, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
208 {as: AMOVBZU, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
209 {as: AMOVBZU, a1: C_XOREG, a6: C_REG, type_: 109, size: 4},
211 {as: AMOVHBR, a1: C_REG, a6: C_XOREG, type_: 44, size: 4},
212 {as: AMOVHBR, a1: C_XOREG, a6: C_REG, type_: 45, size: 4},
214 {as: AMOVB, a1: C_ADDR, a6: C_REG, type_: 75, size: 12},
215 {as: AMOVB, a1: C_LOREG, a6: C_REG, type_: 36, size: 12},
216 {as: AMOVB, a1: C_SOREG, a6: C_REG, type_: 8, size: 8},
217 {as: AMOVB, a1: C_XOREG, a6: C_REG, type_: 109, size: 8},
218 {as: AMOVB, a1: C_REG, a6: C_ADDR, type_: 74, size: 8},
219 {as: AMOVB, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
220 {as: AMOVB, a1: C_REG, a6: C_LOREG, type_: 35, size: 8},
221 {as: AMOVB, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
222 {as: AMOVB, a1: C_REG, a6: C_REG, type_: 13, size: 4},
224 {as: AMOVBZ, a1: C_ADDR, a6: C_REG, type_: 75, size: 8},
225 {as: AMOVBZ, a1: C_LOREG, a6: C_REG, type_: 36, size: 8},
226 {as: AMOVBZ, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
227 {as: AMOVBZ, a1: C_XOREG, a6: C_REG, type_: 109, size: 4},
228 {as: AMOVBZ, a1: C_REG, a6: C_ADDR, type_: 74, size: 8},
229 {as: AMOVBZ, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
230 {as: AMOVBZ, a1: C_REG, a6: C_LOREG, type_: 35, size: 8},
231 {as: AMOVBZ, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
232 {as: AMOVBZ, a1: C_REG, a6: C_REG, type_: 13, size: 4},
234 {as: AMOVD, a1: C_ADDCON, a6: C_REG, type_: 3, size: 4},
235 {as: AMOVD, a1: C_ANDCON, a6: C_REG, type_: 3, size: 4},
236 {as: AMOVD, a1: C_UCON, a6: C_REG, type_: 3, size: 4},
237 {as: AMOVD, a1: C_LCON, a6: C_REG, type_: 19, size: 8},
238 {as: AMOVD, a1: C_SACON, a6: C_REG, type_: 3, size: 4},
239 {as: AMOVD, a1: C_LACON, a6: C_REG, type_: 26, size: 8},
240 {as: AMOVD, a1: C_ADDR, a6: C_REG, type_: 75, size: 8},
241 {as: AMOVD, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
242 {as: AMOVD, a1: C_XOREG, a6: C_REG, type_: 109, size: 4},
243 {as: AMOVD, a1: C_SOREG, a6: C_SPR, type_: 107, size: 8},
244 {as: AMOVD, a1: C_LOREG, a6: C_REG, type_: 36, size: 8},
245 {as: AMOVD, a1: C_TLS_LE, a6: C_REG, type_: 79, size: 8},
246 {as: AMOVD, a1: C_TLS_IE, a6: C_REG, type_: 80, size: 12},
247 {as: AMOVD, a1: C_SPR, a6: C_REG, type_: 66, size: 4},
248 {as: AMOVD, a1: C_REG, a6: C_ADDR, type_: 74, size: 8},
249 {as: AMOVD, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
250 {as: AMOVD, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
251 {as: AMOVD, a1: C_SPR, a6: C_SOREG, type_: 106, size: 8},
252 {as: AMOVD, a1: C_REG, a6: C_LOREG, type_: 35, size: 8},
253 {as: AMOVD, a1: C_REG, a6: C_SPR, type_: 66, size: 4},
254 {as: AMOVD, a1: C_REG, a6: C_REG, type_: 13, size: 4},
256 {as: AMOVW, a1: C_ADDCON, a6: C_REG, type_: 3, size: 4},
257 {as: AMOVW, a1: C_ANDCON, a6: C_REG, type_: 3, size: 4},
258 {as: AMOVW, a1: C_UCON, a6: C_REG, type_: 3, size: 4},
259 {as: AMOVW, a1: C_LCON, a6: C_REG, type_: 19, size: 8},
260 {as: AMOVW, a1: C_SACON, a6: C_REG, type_: 3, size: 4},
261 {as: AMOVW, a1: C_LACON, a6: C_REG, type_: 26, size: 8},
262 {as: AMOVW, a1: C_ADDR, a6: C_REG, type_: 75, size: 8},
263 {as: AMOVW, a1: C_CREG, a6: C_REG, type_: 68, size: 4},
264 {as: AMOVW, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
265 {as: AMOVW, a1: C_LOREG, a6: C_REG, type_: 36, size: 8},
266 {as: AMOVW, a1: C_XOREG, a6: C_REG, type_: 109, size: 4},
267 {as: AMOVW, a1: C_SPR, a6: C_REG, type_: 66, size: 4},
268 {as: AMOVW, a1: C_REG, a6: C_ADDR, type_: 74, size: 8},
269 {as: AMOVW, a1: C_REG, a6: C_CREG, type_: 69, size: 4},
270 {as: AMOVW, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
271 {as: AMOVW, a1: C_REG, a6: C_LOREG, type_: 35, size: 8},
272 {as: AMOVW, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
273 {as: AMOVW, a1: C_REG, a6: C_SPR, type_: 66, size: 4},
274 {as: AMOVW, a1: C_REG, a6: C_REG, type_: 13, size: 4},
276 {as: AFMOVD, a1: C_ADDCON, a6: C_FREG, type_: 24, size: 8},
277 {as: AFMOVD, a1: C_SOREG, a6: C_FREG, type_: 8, size: 4},
278 {as: AFMOVD, a1: C_XOREG, a6: C_FREG, type_: 109, size: 4},
279 {as: AFMOVD, a1: C_LOREG, a6: C_FREG, type_: 36, size: 8},
280 {as: AFMOVD, a1: C_ZCON, a6: C_FREG, type_: 24, size: 4},
281 {as: AFMOVD, a1: C_ADDR, a6: C_FREG, type_: 75, size: 8},
282 {as: AFMOVD, a1: C_FREG, a6: C_FREG, type_: 33, size: 4},
283 {as: AFMOVD, a1: C_FREG, a6: C_SOREG, type_: 7, size: 4},
284 {as: AFMOVD, a1: C_FREG, a6: C_XOREG, type_: 108, size: 4},
285 {as: AFMOVD, a1: C_FREG, a6: C_LOREG, type_: 35, size: 8},
286 {as: AFMOVD, a1: C_FREG, a6: C_ADDR, type_: 74, size: 8},
288 {as: AFMOVSX, a1: C_XOREG, a6: C_FREG, type_: 45, size: 4},
289 {as: AFMOVSX, a1: C_FREG, a6: C_XOREG, type_: 44, size: 4},
291 {as: AFMOVSZ, a1: C_ZOREG, a6: C_FREG, type_: 45, size: 4},
292 {as: AFMOVSZ, a1: C_XOREG, a6: C_FREG, type_: 45, size: 4},
294 {as: AMOVFL, a1: C_CREG, a6: C_CREG, type_: 67, size: 4},
295 {as: AMOVFL, a1: C_FPSCR, a6: C_CREG, type_: 73, size: 4},
296 {as: AMOVFL, a1: C_FPSCR, a6: C_FREG, type_: 53, size: 4},
297 {as: AMOVFL, a1: C_FREG, a3: C_LCON, a6: C_FPSCR, type_: 64, size: 4},
298 {as: AMOVFL, a1: C_FREG, a6: C_FPSCR, type_: 64, size: 4},
299 {as: AMOVFL, a1: C_LCON, a6: C_FPSCR, type_: 65, size: 4},
300 {as: AMOVFL, a1: C_REG, a6: C_CREG, type_: 69, size: 4},
301 {as: AMOVFL, a1: C_REG, a6: C_LCON, type_: 69, size: 4},
303 {as: ASYSCALL, type_: 5, size: 4},
304 {as: ASYSCALL, a1: C_REG, type_: 77, size: 12},
305 {as: ASYSCALL, a1: C_SCON, type_: 77, size: 12},
306 {as: ABEQ, a6: C_SBRA, type_: 16, size: 4},
307 {as: ABEQ, a1: C_CREG, a6: C_SBRA, type_: 16, size: 4},
308 {as: ABR, a6: C_LBRA, type_: 11, size: 4}, // b label
309 {as: ABR, a6: C_LBRAPIC, type_: 11, size: 8}, // b label; nop
310 {as: ABR, a6: C_LR, type_: 18, size: 4}, // blr
311 {as: ABR, a6: C_CTR, type_: 18, size: 4}, // bctr
312 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_SBRA, type_: 16, size: 4}, // bc bo, bi, label
313 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_LBRA, type_: 17, size: 4}, // bc bo, bi, label
314 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_LR, type_: 18, size: 4}, // bclr bo, bi
315 {as: ABC, a1: C_SCON, a2: C_CRBIT, a3: C_SCON, a6: C_LR, type_: 18, size: 4}, // bclr bo, bi, bh
316 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_CTR, type_: 18, size: 4}, // bcctr bo, bi
317 {as: ABDNZ, a6: C_SBRA, type_: 16, size: 4},
318 {as: ASYNC, type_: 46, size: 4},
319 {as: AWORD, a1: C_LCON, type_: 40, size: 4},
320 {as: ADWORD, a1: C_64CON, type_: 31, size: 8},
321 {as: ADWORD, a1: C_LACON, type_: 31, size: 8},
322 {as: AADDME, a1: C_REG, a6: C_REG, type_: 47, size: 4},
323 {as: AEXTSB, a1: C_REG, a6: C_REG, type_: 48, size: 4},
324 {as: AEXTSB, a6: C_REG, type_: 48, size: 4},
325 {as: AISEL, a1: C_U5CON, a2: C_REG, a3: C_REG, a6: C_REG, type_: 84, size: 4},
326 {as: AISEL, a1: C_CRBIT, a2: C_REG, a3: C_REG, a6: C_REG, type_: 84, size: 4},
327 {as: ANEG, a1: C_REG, a6: C_REG, type_: 47, size: 4},
328 {as: ANEG, a6: C_REG, type_: 47, size: 4},
329 {as: AREM, a1: C_REG, a6: C_REG, type_: 50, size: 12},
330 {as: AREM, a1: C_REG, a2: C_REG, a6: C_REG, type_: 50, size: 12},
331 {as: AREMU, a1: C_REG, a6: C_REG, type_: 50, size: 16},
332 {as: AREMU, a1: C_REG, a2: C_REG, a6: C_REG, type_: 50, size: 16},
333 {as: AREMD, a1: C_REG, a6: C_REG, type_: 51, size: 12},
334 {as: AREMD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 51, size: 12},
335 {as: AMTFSB0, a1: C_SCON, type_: 52, size: 4},
336 /* Other ISA 2.05+ instructions */
337 {as: APOPCNTD, a1: C_REG, a6: C_REG, type_: 93, size: 4}, /* population count, x-form */
338 {as: ACMPB, a1: C_REG, a2: C_REG, a6: C_REG, type_: 92, size: 4}, /* compare byte, x-form */
339 {as: ACMPEQB, a1: C_REG, a2: C_REG, a6: C_CREG, type_: 92, size: 4}, /* compare equal byte, x-form, ISA 3.0 */
340 {as: ACMPEQB, a1: C_REG, a6: C_REG, type_: 70, size: 4},
341 {as: AFTDIV, a1: C_FREG, a2: C_FREG, a6: C_SCON, type_: 92, size: 4}, /* floating test for sw divide, x-form */
342 {as: AFTSQRT, a1: C_FREG, a6: C_SCON, type_: 93, size: 4}, /* floating test for sw square root, x-form */
343 {as: ACOPY, a1: C_REG, a6: C_REG, type_: 92, size: 4}, /* copy/paste facility, x-form */
344 {as: ADARN, a1: C_SCON, a6: C_REG, type_: 92, size: 4}, /* deliver random number, x-form */
345 {as: AMADDHD, a1: C_REG, a2: C_REG, a3: C_REG, a6: C_REG, type_: 83, size: 4}, /* multiply-add high/low doubleword, va-form */
346 {as: AADDEX, a1: C_REG, a2: C_REG, a3: C_SCON, a6: C_REG, type_: 94, size: 4}, /* add extended using alternate carry, z23-form */
347 {as: ACRAND, a1: C_CRBIT, a2: C_CRBIT, a6: C_CRBIT, type_: 2, size: 4}, /* logical ops for condition register bits xl-form */
349 /* Vector instructions */
352 {as: ALVEBX, a1: C_XOREG, a6: C_VREG, type_: 45, size: 4}, /* vector load, x-form */
355 {as: ASTVEBX, a1: C_VREG, a6: C_XOREG, type_: 44, size: 4}, /* vector store, x-form */
358 {as: AVAND, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector and, vx-form */
359 {as: AVOR, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector or, vx-form */
362 {as: AVADDUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add unsigned modulo, vx-form */
363 {as: AVADDCU, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add & write carry unsigned, vx-form */
364 {as: AVADDUS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add unsigned saturate, vx-form */
365 {as: AVADDSS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add signed saturate, vx-form */
366 {as: AVADDE, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector add extended, va-form */
368 /* Vector subtract */
369 {as: AVSUBUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract unsigned modulo, vx-form */
370 {as: AVSUBCU, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract & write carry unsigned, vx-form */
371 {as: AVSUBUS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract unsigned saturate, vx-form */
372 {as: AVSUBSS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract signed saturate, vx-form */
373 {as: AVSUBE, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector subtract extended, va-form */
375 /* Vector multiply */
376 {as: AVMULESB, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector multiply, vx-form */
377 {as: AVPMSUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector polynomial multiply & sum, vx-form */
378 {as: AVMSUMUDM, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector multiply-sum, va-form */
381 {as: AVR, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector rotate, vx-form */
384 {as: AVS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector shift, vx-form */
385 {as: AVSA, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector shift algebraic, vx-form */
386 {as: AVSOI, a1: C_ANDCON, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector shift by octet immediate, va-form */
389 {as: AVCLZ, a1: C_VREG, a6: C_VREG, type_: 85, size: 4}, /* vector count leading zeros, vx-form */
390 {as: AVPOPCNT, a1: C_VREG, a6: C_VREG, type_: 85, size: 4}, /* vector population count, vx-form */
393 {as: AVCMPEQ, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare equal, vc-form */
394 {as: AVCMPGT, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare greater than, vc-form */
395 {as: AVCMPNEZB, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare not equal, vx-form */
398 {as: AVMRGOW, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector merge odd word, vx-form */
401 {as: AVPERM, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector permute, va-form */
403 /* Vector bit permute */
404 {as: AVBPERMQ, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector bit permute, vx-form */
407 {as: AVSEL, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector select, va-form */
410 {as: AVSPLTB, a1: C_SCON, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector splat, vx-form */
411 {as: AVSPLTB, a1: C_ADDCON, a2: C_VREG, a6: C_VREG, type_: 82, size: 4},
412 {as: AVSPLTISB, a1: C_SCON, a6: C_VREG, type_: 82, size: 4}, /* vector splat immediate, vx-form */
413 {as: AVSPLTISB, a1: C_ADDCON, a6: C_VREG, type_: 82, size: 4},
416 {as: AVCIPH, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES cipher, vx-form */
417 {as: AVNCIPH, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES inverse cipher, vx-form */
418 {as: AVSBOX, a1: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES subbytes, vx-form */
421 {as: AVSHASIGMA, a1: C_ANDCON, a2: C_VREG, a3: C_ANDCON, a6: C_VREG, type_: 82, size: 4}, /* vector SHA sigma, vx-form */
423 /* VSX vector load */
424 {as: ALXVD2X, a1: C_XOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx vector load, xx1-form */
425 {as: ALXV, a1: C_SOREG, a6: C_VSREG, type_: 96, size: 4}, /* vsx vector load, dq-form */
426 {as: ALXVL, a1: C_REG, a2: C_REG, a6: C_VSREG, type_: 98, size: 4}, /* vsx vector load length */
428 /* VSX vector store */
429 {as: ASTXVD2X, a1: C_VSREG, a6: C_XOREG, type_: 86, size: 4}, /* vsx vector store, xx1-form */
430 {as: ASTXV, a1: C_VSREG, a6: C_SOREG, type_: 97, size: 4}, /* vsx vector store, dq-form */
431 {as: ASTXVL, a1: C_VSREG, a2: C_REG, a6: C_REG, type_: 99, size: 4}, /* vsx vector store with length x-form */
433 /* VSX scalar load */
434 {as: ALXSDX, a1: C_XOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx scalar load, xx1-form */
436 /* VSX scalar store */
437 {as: ASTXSDX, a1: C_VSREG, a6: C_XOREG, type_: 86, size: 4}, /* vsx scalar store, xx1-form */
439 /* VSX scalar as integer load */
440 {as: ALXSIWAX, a1: C_XOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx scalar as integer load, xx1-form */
442 /* VSX scalar store as integer */
443 {as: ASTXSIWX, a1: C_VSREG, a6: C_XOREG, type_: 86, size: 4}, /* vsx scalar as integer store, xx1-form */
445 /* VSX move from VSR */
446 {as: AMFVSRD, a1: C_VSREG, a6: C_REG, type_: 88, size: 4},
447 {as: AMFVSRD, a1: C_FREG, a6: C_REG, type_: 88, size: 4},
449 /* VSX move to VSR */
450 {as: AMTVSRD, a1: C_REG, a6: C_VSREG, type_: 104, size: 4},
451 {as: AMTVSRD, a1: C_REG, a6: C_FREG, type_: 104, size: 4},
452 {as: AMTVSRDD, a1: C_REG, a2: C_REG, a6: C_VSREG, type_: 104, size: 4},
455 {as: AXXLAND, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx and, xx3-form */
456 {as: AXXLOR, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx or, xx3-form */
459 {as: AXXSEL, a1: C_VSREG, a2: C_VSREG, a3: C_VSREG, a6: C_VSREG, type_: 91, size: 4}, /* vsx select, xx4-form */
462 {as: AXXMRGHW, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx merge, xx3-form */
465 {as: AXXSPLTW, a1: C_VSREG, a3: C_SCON, a6: C_VSREG, type_: 89, size: 4}, /* vsx splat, xx2-form */
466 {as: AXXSPLTIB, a1: C_SCON, a6: C_VSREG, type_: 100, size: 4}, /* vsx splat, xx2-form */
469 {as: AXXPERM, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx permute, xx3-form */
472 {as: AXXSLDWI, a1: C_VSREG, a2: C_VSREG, a3: C_SCON, a6: C_VSREG, type_: 90, size: 4}, /* vsx shift immediate, xx3-form */
474 /* VSX reverse bytes */
475 {as: AXXBRQ, a1: C_VSREG, a6: C_VSREG, type_: 101, size: 4}, /* vsx reverse bytes */
477 /* VSX scalar FP-FP conversion */
478 {as: AXSCVDPSP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar fp-fp conversion, xx2-form */
480 /* VSX vector FP-FP conversion */
481 {as: AXVCVDPSP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector fp-fp conversion, xx2-form */
483 /* VSX scalar FP-integer conversion */
484 {as: AXSCVDPSXDS, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar fp-integer conversion, xx2-form */
486 /* VSX scalar integer-FP conversion */
487 {as: AXSCVSXDDP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar integer-fp conversion, xx2-form */
489 /* VSX vector FP-integer conversion */
490 {as: AXVCVDPSXDS, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector fp-integer conversion, xx2-form */
492 /* VSX vector integer-FP conversion */
493 {as: AXVCVSXDDP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector integer-fp conversion, xx2-form */
495 {as: ACMP, a1: C_REG, a6: C_REG, type_: 70, size: 4},
496 {as: ACMP, a1: C_REG, a2: C_CREG, a6: C_REG, type_: 70, size: 4},
497 {as: ACMP, a1: C_REG, a6: C_ADDCON, type_: 71, size: 4},
498 {as: ACMP, a1: C_REG, a2: C_CREG, a6: C_ADDCON, type_: 71, size: 4},
499 {as: ACMPU, a1: C_REG, a6: C_REG, type_: 70, size: 4},
500 {as: ACMPU, a1: C_REG, a2: C_CREG, a6: C_REG, type_: 70, size: 4},
501 {as: ACMPU, a1: C_REG, a6: C_ANDCON, type_: 71, size: 4},
502 {as: ACMPU, a1: C_REG, a2: C_CREG, a6: C_ANDCON, type_: 71, size: 4},
503 {as: AFCMPO, a1: C_FREG, a6: C_FREG, type_: 70, size: 4},
504 {as: AFCMPO, a1: C_FREG, a2: C_CREG, a6: C_FREG, type_: 70, size: 4},
505 {as: ATW, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 60, size: 4},
506 {as: ATW, a1: C_LCON, a2: C_REG, a6: C_ADDCON, type_: 61, size: 4},
507 {as: ADCBF, a1: C_SOREG, type_: 43, size: 4},
508 {as: ADCBF, a1: C_XOREG, type_: 43, size: 4},
509 {as: ADCBF, a1: C_XOREG, a2: C_REG, a6: C_SCON, type_: 43, size: 4},
510 {as: ADCBF, a1: C_SOREG, a6: C_SCON, type_: 43, size: 4},
511 {as: ADCBF, a1: C_XOREG, a6: C_SCON, type_: 43, size: 4},
512 {as: ASTDCCC, a1: C_REG, a2: C_REG, a6: C_XOREG, type_: 44, size: 4},
513 {as: ASTDCCC, a1: C_REG, a6: C_XOREG, type_: 44, size: 4},
514 {as: ALDAR, a1: C_XOREG, a6: C_REG, type_: 45, size: 4},
515 {as: ALDAR, a1: C_XOREG, a3: C_ANDCON, a6: C_REG, type_: 45, size: 4},
516 {as: AEIEIO, type_: 46, size: 4},
517 {as: ATLBIE, a1: C_REG, type_: 49, size: 4},
518 {as: ATLBIE, a1: C_SCON, a6: C_REG, type_: 49, size: 4},
519 {as: ASLBMFEE, a1: C_REG, a6: C_REG, type_: 55, size: 4},
520 {as: ASLBMTE, a1: C_REG, a6: C_REG, type_: 55, size: 4},
521 {as: ASTSW, a1: C_REG, a6: C_XOREG, type_: 44, size: 4},
522 {as: ASTSW, a1: C_REG, a3: C_LCON, a6: C_ZOREG, type_: 41, size: 4},
523 {as: ALSW, a1: C_XOREG, a6: C_REG, type_: 45, size: 4},
524 {as: ALSW, a1: C_ZOREG, a3: C_LCON, a6: C_REG, type_: 42, size: 4},
526 {as: obj.AUNDEF, type_: 78, size: 4},
527 {as: obj.APCDATA, a1: C_LCON, a6: C_LCON, type_: 0, size: 0},
528 {as: obj.AFUNCDATA, a1: C_SCON, a6: C_ADDR, type_: 0, size: 0},
529 {as: obj.ANOP, type_: 0, size: 0},
530 {as: obj.ANOP, a1: C_LCON, type_: 0, size: 0}, // NOP operand variations added for #40689
531 {as: obj.ANOP, a1: C_REG, type_: 0, size: 0}, // to preserve previous behavior
532 {as: obj.ANOP, a1: C_FREG, type_: 0, size: 0},
533 {as: obj.ADUFFZERO, a6: C_LBRA, type_: 11, size: 4}, // same as ABR/ABL
534 {as: obj.ADUFFCOPY, a6: C_LBRA, type_: 11, size: 4}, // same as ABR/ABL
535 {as: obj.APCALIGN, a1: C_LCON, type_: 0, size: 0}, // align code
538 var oprange [ALAST & obj.AMask][]Optab
540 var xcmp [C_NCLASS][C_NCLASS]bool
542 // padding bytes to add to align code as requested.
543 func addpad(pc, a int64, ctxt *obj.Link, cursym *obj.LSym) int {
544 // For 16 and 32 byte alignment, there is a tradeoff
545 // between aligning the code and adding too many NOPs.
552 // Align to 16 bytes if possible but add at
561 // Align to 32 bytes if possible but add at
571 // When 32 byte alignment is requested on Linux,
572 // promote the function's alignment to 32. On AIX
573 // the function alignment is not changed which might
574 // result in 16 byte alignment but that is still fine.
575 // TODO: alignment on AIX
576 if ctxt.Headtype != objabi.Haix && cursym.Func().Align < 32 {
577 cursym.Func().Align = 32
580 ctxt.Diag("Unexpected alignment: %d for PCALIGN directive\n", a)
585 // Get the implied register of a operand which doesn't specify one. These show up
586 // in handwritten asm like "MOVD R5, foosymbol" where a base register is not supplied,
587 // or "MOVD R5, foo+10(SP) or pseudo-register is used. The other common case is when
588 // generating constants in register like "MOVD $constant, Rx".
589 func (c *ctxt9) getimpliedreg(a *obj.Addr, p *obj.Prog) int {
591 if class >= C_ZCON && class <= C_64CON {
595 case C_SACON, C_LACON:
597 case C_LOREG, C_SOREG, C_ZOREG, C_XOREG:
599 case obj.NAME_EXTERN, obj.NAME_STATIC:
601 case obj.NAME_AUTO, obj.NAME_PARAM:
607 c.ctxt.Diag("failed to determine implied reg for class %v (%v)", DRconv(oclass(a)), p)
611 func span9(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
612 p := cursym.Func().Text
613 if p == nil || p.Link == nil { // handle external functions and ELF section symbols
617 if oprange[AANDN&obj.AMask] == nil {
618 ctxt.Diag("ppc64 ops not initialized, call ppc64.buildop first")
621 c := ctxt9{ctxt: ctxt, newprog: newprog, cursym: cursym, autosize: int32(p.To.Offset)}
628 for p = p.Link; p != nil; p = p.Link {
633 if p.As == obj.APCALIGN {
634 a := c.vregoff(&p.From)
635 m = addpad(pc, a, ctxt, cursym)
637 if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
638 ctxt.Diag("zero-width instruction\n%v", p)
649 * if any procedure is large enough to
650 * generate a large SBRA branch, then
651 * generate extra passes putting branches
652 * around jmps to fix. this is rare.
659 var falign int32 // Track increased alignment requirements for prefix.
663 falign = 0 // Note, linker bumps function symbols to funcAlign.
664 for p = c.cursym.Func().Text.Link; p != nil; p = p.Link {
668 // very large conditional branches
669 if (o.type_ == 16 || o.type_ == 17) && p.To.Target() != nil {
670 otxt = p.To.Target().Pc - pc
671 if otxt < -(1<<15)+10 || otxt >= (1<<15)-10 {
672 // Assemble the instruction with a target not too far to figure out BI and BO fields.
673 // If only the CTR or BI (the CR bit) are tested, the conditional branch can be inverted,
674 // and only one extra branch is needed to reach the target.
676 p.To.SetTarget(p.Link)
677 o.asmout(&c, p, o, &out)
680 bo := int64(out[0]>>21) & 31
681 bi := int16((out[0] >> 16) & 31)
685 // A conditional branch that is unconditionally taken. This cannot be inverted.
686 } else if bo&0x10 == 0x10 {
687 // A branch based on the value of CTR. Invert the CTR comparison against zero bit.
690 } else if bo&0x04 == 0x04 {
691 // A branch based on CR bit. Invert the BI comparison bit.
698 // BC bo,...,far_away_target
701 // BC invert(bo),next_insn
702 // JMP far_away_target
706 p.From = obj.Addr{Type: obj.TYPE_CONST, Name: obj.NAME_NONE, Offset: bo}
709 q.To.Type = obj.TYPE_BRANCH
710 q.To.SetTarget(p.To.Target())
712 p.To.SetTarget(p.Link)
714 p.Reg = REG_CRBIT0 + bi
717 // BC ...,far_away_target
723 // JMP far_away_target
730 q.To.Type = obj.TYPE_BRANCH
731 q.To.SetTarget(p.To.Target())
737 q.To.Type = obj.TYPE_BRANCH
738 q.To.SetTarget(q.Link.Link)
746 if p.As == obj.APCALIGN {
747 a := c.vregoff(&p.From)
748 m = addpad(pc, a, ctxt, cursym)
750 if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
751 ctxt.Diag("zero-width instruction\n%v", p)
757 // Prefixed instructions cannot be placed across a 64B boundary.
758 // Mark and adjust the PC of those which do. A nop will be
759 // inserted during final assembly.
761 mark := p.Mark &^ PFX_X64B
768 // Marks may be adjusted if a too-far conditional branch is
769 // fixed up above. Likewise, inserting a NOP may cause a
770 // branch target to become too far away. We need to run
771 // another iteration and verify no additional changes
778 // Check for 16 or 32B crossing of this prefixed insn.
779 // These do no require padding, but do require increasing
780 // the function alignment to prevent them from potentially
781 // crossing a 64B boundary when the linker assigns the final
784 case 28: // 32B crossing
786 case 12: // 16B crossing
800 c.cursym.Func().Align = falign
801 c.cursym.Grow(c.cursym.Size)
803 // lay out the code, emitting code and data relocations.
806 nop := LOP_IRR(OP_ORI, REGZERO, REGZERO, 0)
808 for p := c.cursym.Func().Text.Link; p != nil; p = p.Link {
811 if int(o.size) > 4*len(out) {
812 log.Fatalf("out array in span9 is too small, need at least %d for %v", o.size/4, p)
814 // asmout is not set up to add large amounts of padding
815 if o.type_ == 0 && p.As == obj.APCALIGN {
816 aln := c.vregoff(&p.From)
817 v := addpad(p.Pc, aln, c.ctxt, c.cursym)
819 // Same padding instruction for all
820 for i = 0; i < int32(v/4); i++ {
821 c.ctxt.Arch.ByteOrder.PutUint32(bp, nop)
826 if p.Mark&PFX_X64B != 0 {
827 c.ctxt.Arch.ByteOrder.PutUint32(bp, nop)
830 o.asmout(&c, p, o, &out)
831 for i = 0; i < int32(o.size/4); i++ {
832 c.ctxt.Arch.ByteOrder.PutUint32(bp, out[i])
839 func isint32(v int64) bool {
840 return int64(int32(v)) == v
843 func isuint32(v uint64) bool {
844 return uint64(uint32(v)) == v
847 func (c *ctxt9) aclassreg(reg int16) int {
848 if REG_R0 <= reg && reg <= REG_R31 {
849 return C_REGP + int(reg&1)
851 if REG_F0 <= reg && reg <= REG_F31 {
852 return C_FREGP + int(reg&1)
854 if REG_V0 <= reg && reg <= REG_V31 {
857 if REG_VS0 <= reg && reg <= REG_VS63 {
858 return C_VSREGP + int(reg&1)
860 if REG_CR0 <= reg && reg <= REG_CR7 || reg == REG_CR {
863 if REG_CR0LT <= reg && reg <= REG_CR7SO {
866 if REG_SPR0 <= reg && reg <= REG_SPR0+1023 {
880 if REG_A0 <= reg && reg <= REG_A7 {
883 if reg == REG_FPSCR {
889 func (c *ctxt9) aclass(a *obj.Addr) int {
895 return c.aclassreg(a.Reg)
899 if a.Name != obj.NAME_NONE || a.Offset != 0 {
900 c.ctxt.Logf("Unexpected Instruction operand index %d offset %d class %d \n", a.Index, a.Offset, a.Class)
906 case obj.NAME_GOTREF, obj.NAME_TOCREF:
909 case obj.NAME_EXTERN,
911 c.instoffset = a.Offset
914 } else if a.Sym.Type == objabi.STLSBSS {
915 // For PIC builds, use 12 byte got initial-exec TLS accesses.
916 if c.ctxt.Flag_shared {
919 // Otherwise, use 8 byte local-exec TLS accesses.
926 c.instoffset = int64(c.autosize) + a.Offset
928 if c.instoffset >= -BIG && c.instoffset < BIG {
934 c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.Arch.FixedFrameSize
935 if c.instoffset >= -BIG && c.instoffset < BIG {
941 c.instoffset = a.Offset
942 if a.Offset == 0 && a.Index == 0 {
944 } else if c.instoffset >= -BIG && c.instoffset < BIG {
953 case obj.TYPE_TEXTSIZE:
956 case obj.TYPE_FCONST:
957 // The only cases where FCONST will occur are with float64 +/- 0.
958 // All other float constants are generated in memory.
959 f64 := a.Val.(float64)
961 if math.Signbit(f64) {
966 log.Fatalf("Unexpected nonzero FCONST operand %v", a)
972 c.instoffset = a.Offset
974 if -BIG <= c.instoffset && c.instoffset < BIG {
977 if isint32(c.instoffset) {
983 case obj.NAME_EXTERN,
989 c.instoffset = a.Offset
993 c.instoffset = int64(c.autosize) + a.Offset
994 if c.instoffset >= -BIG && c.instoffset < BIG {
1000 c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.Arch.FixedFrameSize
1001 if c.instoffset >= -BIG && c.instoffset < BIG {
1010 if c.instoffset >= 0 {
1011 sbits := bits.Len64(uint64(c.instoffset))
1014 return C_ZCON + sbits
1022 // Special case, a positive int32 value which is a multiple of 2^16
1023 if c.instoffset&0xFFFF == 0 {
1035 sbits := bits.Len64(uint64(^c.instoffset))
1040 // Special case, a negative int32 value which is a multiple of 2^16
1041 if c.instoffset&0xFFFF == 0 {
1052 case obj.TYPE_BRANCH:
1053 if a.Sym != nil && c.ctxt.Flag_dynlink {
1062 func prasm(p *obj.Prog) {
1063 fmt.Printf("%v\n", p)
1066 func (c *ctxt9) oplook(p *obj.Prog) *Optab {
1071 a1 = int(p.From.Class)
1073 a1 = c.aclass(&p.From) + 1
1074 p.From.Class = int8(a1)
1078 argsv := [3]int{C_NONE + 1, C_NONE + 1, C_NONE + 1}
1079 for i, ap := range p.RestArgs {
1080 argsv[i] = int(ap.Addr.Class)
1082 argsv[i] = c.aclass(&ap.Addr) + 1
1083 ap.Addr.Class = int8(argsv[i])
1091 a6 := int(p.To.Class)
1093 a6 = c.aclass(&p.To) + 1
1094 p.To.Class = int8(a6)
1100 a2 = c.aclassreg(p.Reg)
1103 // c.ctxt.Logf("oplook %v %d %d %d %d\n", p, a1, a2, a3, a4, a5, a6)
1104 ops := oprange[p.As&obj.AMask]
1111 for i := range ops {
1113 if c1[op.a1] && c2[op.a2] && c3[op.a3] && c4[op.a4] && c5[op.a5] && c6[op.a6] {
1114 p.Optab = uint16(cap(optab) - cap(ops) + i + 1)
1119 c.ctxt.Diag("illegal combination %v %v %v %v %v %v %v", p.As, DRconv(a1), DRconv(a2), DRconv(a3), DRconv(a4), DRconv(a5), DRconv(a6))
1127 // Compare two operand types (ex C_REG, or C_SCON)
1128 // and return true if b is compatible with a.
1130 // Argument comparison isn't reflexitive, so care must be taken.
1131 // a is the argument type as found in optab, b is the argument as
1132 // fitted by aclass.
1133 func cmp(a int, b int) bool {
1140 if b == C_LR || b == C_XER || b == C_CTR {
1145 return cmp(C_ZCON, b)
1147 return cmp(C_U1CON, b)
1149 return cmp(C_U2CON, b)
1151 return cmp(C_U3CON, b)
1153 return cmp(C_U4CON, b)
1155 return cmp(C_U5CON, b)
1157 return cmp(C_U8CON, b)
1159 return cmp(C_U15CON, b)
1162 return cmp(C_U15CON, b)
1164 return cmp(C_S16CON, b) || cmp(C_U16CON, b) || cmp(C_32S16CON, b)
1166 return cmp(C_32CON, b)
1168 return cmp(C_S34CON, b)
1171 return cmp(C_ZCON, b)
1174 return cmp(C_SACON, b)
1177 return cmp(C_SBRA, b)
1180 return cmp(C_ZOREG, b)
1183 return cmp(C_SOREG, b)
1186 return cmp(C_REG, b) || cmp(C_ZOREG, b)
1188 // An even/odd register input always matches the regular register types.
1190 return cmp(C_REGP, b) || (b == C_ZCON && r0iszero != 0)
1192 return cmp(C_FREGP, b)
1194 /* Allow any VR argument as a VSR operand. */
1195 return cmp(C_VSREGP, b) || cmp(C_VREG, b)
1204 // Used when sorting the optab. Sorting is
1205 // done in a way so that the best choice of
1206 // opcode/operand combination is considered first.
1207 func optabLess(i, j int) bool {
1210 n := int(p1.as) - int(p2.as)
1215 // Consider those that generate fewer
1216 // instructions first.
1217 n = int(p1.size) - int(p2.size)
1221 // operand order should match
1222 // better choices first
1223 n = int(p1.a1) - int(p2.a1)
1227 n = int(p1.a2) - int(p2.a2)
1231 n = int(p1.a3) - int(p2.a3)
1235 n = int(p1.a4) - int(p2.a4)
1239 n = int(p1.a5) - int(p2.a5)
1243 n = int(p1.a6) - int(p2.a6)
1250 // Add an entry to the opcode table for
1251 // a new opcode b0 with the same operand combinations
1253 func opset(a, b0 obj.As) {
1254 oprange[a&obj.AMask] = oprange[b0]
1257 // Build the opcode table
1258 func buildop(ctxt *obj.Link) {
1259 if oprange[AANDN&obj.AMask] != nil {
1260 // Already initialized; stop now.
1261 // This happens in the cmd/asm tests,
1262 // each of which re-initializes the arch.
1266 for i := 0; i < C_NCLASS; i++ {
1267 for n := 0; n < C_NCLASS; n++ {
1273 for i := range optab {
1274 // Use the legacy assembler function if none provided.
1275 if optab[i].asmout == nil {
1276 optab[i].asmout = asmout
1279 // Append the generated entries, sort, and fill out oprange.
1280 optab = append(optab, optabGen...)
1281 sort.Slice(optab, optabLess)
1282 for i := 0; i < len(optab); {
1286 for i < len(optab) && optab[i].as == r {
1289 oprange[r0] = optab[start:i]
1294 ctxt.Diag("unknown op in build: %v", r)
1295 log.Fatalf("instruction missing from switch in asm9.go:buildop: %v", r)
1298 case ADCBF: /* unary indexed: op (b+a); op (b) */
1307 case ASTDCCC: /* indexed store: op s,(b+a); op s,(b) */
1312 case AREM: /* macro */
1324 case ADIVW: /* op Rb[,Ra],Rd */
1329 opset(AMULHWUCC, r0)
1331 opset(AMULLWVCC, r0)
1339 opset(ADIVWUVCC, r0)
1356 opset(AMULHDUCC, r0)
1358 opset(AMULLDVCC, r0)
1365 opset(ADIVDEUCC, r0)
1370 opset(ADIVDUVCC, r0)
1382 case APOPCNTD: /* popcntd, popcntw, popcntb, cnttzw, cnttzd */
1386 opset(ACNTTZWCC, r0)
1388 opset(ACNTTZDCC, r0)
1390 case ACOPY: /* copy, paste. */
1393 case AMADDHD: /* maddhd, maddhdu, maddld */
1397 case AMOVBZ: /* lbz, stz, rlwm(r/r), lhz, lha, stz, and x variants */
1401 case AMOVBZU: /* lbz[x]u, stb[x]u, lhz[x]u, lha[x]u, sth[u]x, ld[x]u, std[u]x */
1410 case ALVEBX: /* lvebx, lvehx, lvewx, lvx, lvxl, lvsl, lvsr */
1418 case ASTVEBX: /* stvebx, stvehx, stvewx, stvx, stvxl */
1424 case AVAND: /* vand, vandc, vnand */
1429 case AVMRGOW: /* vmrgew, vmrgow */
1432 case AVOR: /* vor, vorc, vxor, vnor, veqv */
1439 case AVADDUM: /* vaddubm, vadduhm, vadduwm, vaddudm, vadduqm */
1446 case AVADDCU: /* vaddcuq, vaddcuw */
1450 case AVADDUS: /* vaddubs, vadduhs, vadduws */
1455 case AVADDSS: /* vaddsbs, vaddshs, vaddsws */
1460 case AVADDE: /* vaddeuqm, vaddecuq */
1461 opset(AVADDEUQM, r0)
1462 opset(AVADDECUQ, r0)
1464 case AVSUBUM: /* vsububm, vsubuhm, vsubuwm, vsubudm, vsubuqm */
1471 case AVSUBCU: /* vsubcuq, vsubcuw */
1475 case AVSUBUS: /* vsububs, vsubuhs, vsubuws */
1480 case AVSUBSS: /* vsubsbs, vsubshs, vsubsws */
1485 case AVSUBE: /* vsubeuqm, vsubecuq */
1486 opset(AVSUBEUQM, r0)
1487 opset(AVSUBECUQ, r0)
1489 case AVMULESB: /* vmulesb, vmulosb, vmuleub, vmuloub, vmulosh, vmulouh, vmulesw, vmulosw, vmuleuw, vmulouw, vmuluwm */
1502 case AVPMSUM: /* vpmsumb, vpmsumh, vpmsumw, vpmsumd */
1508 case AVR: /* vrlb, vrlh, vrlw, vrld */
1514 case AVS: /* vs[l,r], vs[l,r]o, vs[l,r]b, vs[l,r]h, vs[l,r]w, vs[l,r]d */
1528 case AVSA: /* vsrab, vsrah, vsraw, vsrad */
1534 case AVSOI: /* vsldoi */
1537 case AVCLZ: /* vclzb, vclzh, vclzw, vclzd */
1543 case AVPOPCNT: /* vpopcntb, vpopcnth, vpopcntw, vpopcntd */
1544 opset(AVPOPCNTB, r0)
1545 opset(AVPOPCNTH, r0)
1546 opset(AVPOPCNTW, r0)
1547 opset(AVPOPCNTD, r0)
1549 case AVCMPEQ: /* vcmpequb[.], vcmpequh[.], vcmpequw[.], vcmpequd[.] */
1550 opset(AVCMPEQUB, r0)
1551 opset(AVCMPEQUBCC, r0)
1552 opset(AVCMPEQUH, r0)
1553 opset(AVCMPEQUHCC, r0)
1554 opset(AVCMPEQUW, r0)
1555 opset(AVCMPEQUWCC, r0)
1556 opset(AVCMPEQUD, r0)
1557 opset(AVCMPEQUDCC, r0)
1559 case AVCMPGT: /* vcmpgt[u,s]b[.], vcmpgt[u,s]h[.], vcmpgt[u,s]w[.], vcmpgt[u,s]d[.] */
1560 opset(AVCMPGTUB, r0)
1561 opset(AVCMPGTUBCC, r0)
1562 opset(AVCMPGTUH, r0)
1563 opset(AVCMPGTUHCC, r0)
1564 opset(AVCMPGTUW, r0)
1565 opset(AVCMPGTUWCC, r0)
1566 opset(AVCMPGTUD, r0)
1567 opset(AVCMPGTUDCC, r0)
1568 opset(AVCMPGTSB, r0)
1569 opset(AVCMPGTSBCC, r0)
1570 opset(AVCMPGTSH, r0)
1571 opset(AVCMPGTSHCC, r0)
1572 opset(AVCMPGTSW, r0)
1573 opset(AVCMPGTSWCC, r0)
1574 opset(AVCMPGTSD, r0)
1575 opset(AVCMPGTSDCC, r0)
1577 case AVCMPNEZB: /* vcmpnezb[.] */
1578 opset(AVCMPNEZBCC, r0)
1580 opset(AVCMPNEBCC, r0)
1582 opset(AVCMPNEHCC, r0)
1584 opset(AVCMPNEWCC, r0)
1586 case AVPERM: /* vperm */
1587 opset(AVPERMXOR, r0)
1590 case AVBPERMQ: /* vbpermq, vbpermd */
1593 case AVSEL: /* vsel */
1596 case AVSPLTB: /* vspltb, vsplth, vspltw */
1600 case AVSPLTISB: /* vspltisb, vspltish, vspltisw */
1601 opset(AVSPLTISH, r0)
1602 opset(AVSPLTISW, r0)
1604 case AVCIPH: /* vcipher, vcipherlast */
1606 opset(AVCIPHERLAST, r0)
1608 case AVNCIPH: /* vncipher, vncipherlast */
1609 opset(AVNCIPHER, r0)
1610 opset(AVNCIPHERLAST, r0)
1612 case AVSBOX: /* vsbox */
1615 case AVSHASIGMA: /* vshasigmaw, vshasigmad */
1616 opset(AVSHASIGMAW, r0)
1617 opset(AVSHASIGMAD, r0)
1619 case ALXVD2X: /* lxvd2x, lxvdsx, lxvw4x, lxvh8x, lxvb16x */
1625 case ALXV: /* lxv */
1628 case ALXVL: /* lxvl, lxvll, lxvx */
1632 case ASTXVD2X: /* stxvd2x, stxvdsx, stxvw4x, stxvh8x, stxvb16x */
1635 opset(ASTXVB16X, r0)
1637 case ASTXV: /* stxv */
1640 case ASTXVL: /* stxvl, stxvll, stvx */
1644 case ALXSDX: /* lxsdx */
1647 case ASTXSDX: /* stxsdx */
1650 case ALXSIWAX: /* lxsiwax, lxsiwzx */
1653 case ASTXSIWX: /* stxsiwx */
1656 case AMFVSRD: /* mfvsrd, mfvsrwz (and extended mnemonics), mfvsrld */
1662 case AMTVSRD: /* mtvsrd, mtvsrwa, mtvsrwz (and extended mnemonics), mtvsrdd, mtvsrws */
1669 case AXXLAND: /* xxland, xxlandc, xxleqv, xxlnand */
1674 case AXXLOR: /* xxlorc, xxlnor, xxlor, xxlxor */
1680 case AXXSEL: /* xxsel */
1683 case AXXMRGHW: /* xxmrghw, xxmrglw */
1686 case AXXSPLTW: /* xxspltw */
1689 case AXXSPLTIB: /* xxspltib */
1690 opset(AXXSPLTIB, r0)
1692 case AXXPERM: /* xxpermdi */
1695 case AXXSLDWI: /* xxsldwi */
1696 opset(AXXPERMDI, r0)
1699 case AXXBRQ: /* xxbrq, xxbrd, xxbrw, xxbrh */
1704 case AXSCVDPSP: /* xscvdpsp, xscvspdp, xscvdpspn, xscvspdpn */
1705 opset(AXSCVSPDP, r0)
1706 opset(AXSCVDPSPN, r0)
1707 opset(AXSCVSPDPN, r0)
1709 case AXVCVDPSP: /* xvcvdpsp, xvcvspdp */
1710 opset(AXVCVSPDP, r0)
1712 case AXSCVDPSXDS: /* xscvdpsxds, xscvdpsxws, xscvdpuxds, xscvdpuxws */
1713 opset(AXSCVDPSXWS, r0)
1714 opset(AXSCVDPUXDS, r0)
1715 opset(AXSCVDPUXWS, r0)
1717 case AXSCVSXDDP: /* xscvsxddp, xscvuxddp, xscvsxdsp, xscvuxdsp */
1718 opset(AXSCVUXDDP, r0)
1719 opset(AXSCVSXDSP, r0)
1720 opset(AXSCVUXDSP, r0)
1722 case AXVCVDPSXDS: /* xvcvdpsxds, xvcvdpsxws, xvcvdpuxds, xvcvdpuxws, xvcvspsxds, xvcvspsxws, xvcvspuxds, xvcvspuxws */
1723 opset(AXVCVDPSXDS, r0)
1724 opset(AXVCVDPSXWS, r0)
1725 opset(AXVCVDPUXDS, r0)
1726 opset(AXVCVDPUXWS, r0)
1727 opset(AXVCVSPSXDS, r0)
1728 opset(AXVCVSPSXWS, r0)
1729 opset(AXVCVSPUXDS, r0)
1730 opset(AXVCVSPUXWS, r0)
1732 case AXVCVSXDDP: /* xvcvsxddp, xvcvsxwdp, xvcvuxddp, xvcvuxwdp, xvcvsxdsp, xvcvsxwsp, xvcvuxdsp, xvcvuxwsp */
1733 opset(AXVCVSXWDP, r0)
1734 opset(AXVCVUXDDP, r0)
1735 opset(AXVCVUXWDP, r0)
1736 opset(AXVCVSXDSP, r0)
1737 opset(AXVCVSXWSP, r0)
1738 opset(AXVCVUXDSP, r0)
1739 opset(AXVCVUXWSP, r0)
1741 case AAND: /* logical op Rb,Rs,Ra; no literal */
1755 case AADDME: /* op Ra, Rd */
1759 opset(AADDMEVCC, r0)
1763 opset(AADDZEVCC, r0)
1767 opset(ASUBMEVCC, r0)
1771 opset(ASUBZEVCC, r0)
1794 case AEXTSB: /* op Rs, Ra */
1800 opset(ACNTLZWCC, r0)
1804 opset(ACNTLZDCC, r0)
1806 case AFABS: /* fop [s,]d */
1818 opset(AFCTIWZCC, r0)
1822 opset(AFCTIDZCC, r0)
1826 opset(AFCFIDUCC, r0)
1828 opset(AFCFIDSCC, r0)
1840 opset(AFRSQRTECC, r0)
1844 opset(AFSQRTSCC, r0)
1851 opset(AFCPSGNCC, r0)
1864 opset(AFMADDSCC, r0)
1868 opset(AFMSUBSCC, r0)
1870 opset(AFNMADDCC, r0)
1872 opset(AFNMADDSCC, r0)
1874 opset(AFNMSUBCC, r0)
1876 opset(AFNMSUBSCC, r0)
1889 opset(AMTFSB0CC, r0)
1891 opset(AMTFSB1CC, r0)
1893 case ANEG: /* op [Ra,] Rd */
1899 case AOR: /* or/xor Rb,Rs,Ra; ori/xori $uimm,Rs,R */
1902 case AORIS: /* oris/xoris $uimm,Rs,Ra */
1917 case ASRAW: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */
1921 opset(AEXTSWSLICC, r0)
1923 case ASRAD: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */
1926 case ASUB: /* SUB Ra,Rb,Rd => subf Rd,ra,rb */
1954 opset(ARLDIMICC, r0)
1965 opset(ARLDICLCC, r0)
1967 opset(ARLDICRCC, r0)
1970 opset(ACLRLSLDI, r0)
1983 case ASYSCALL: /* just the op; flow of control */
2022 case AMOVW: /* load/store/move word with sign extension; move 32-bit literals */
2023 opset(AMOVWZ, r0) /* Same as above, but zero extended */
2027 AANDCC, /* and. Rb,Rs,Ra; andi. $uimm,Rs,Ra */
2032 AMOVD, /* load/store/move 64-bit values, including 32-bit literals with/without sign-extension */
2033 AMOVB, /* macro: move byte with sign extension */
2034 AMOVBU, /* macro: move byte with sign extension & update */
2036 /* op $s[,r2],r3; op r1[,r2],r3; no cc/v */
2037 ASUBC, /* op r1,$s,r3; op r1[,r2],r3 */
2063 func OPVXX1(o uint32, xo uint32, oe uint32) uint32 {
2064 return o<<26 | xo<<1 | oe<<11
2067 func OPVXX2(o uint32, xo uint32, oe uint32) uint32 {
2068 return o<<26 | xo<<2 | oe<<11
2071 func OPVXX2VA(o uint32, xo uint32, oe uint32) uint32 {
2072 return o<<26 | xo<<2 | oe<<16
2075 func OPVXX3(o uint32, xo uint32, oe uint32) uint32 {
2076 return o<<26 | xo<<3 | oe<<11
2079 func OPVXX4(o uint32, xo uint32, oe uint32) uint32 {
2080 return o<<26 | xo<<4 | oe<<11
2083 func OPDQ(o uint32, xo uint32, oe uint32) uint32 {
2084 return o<<26 | xo | oe<<4
2087 func OPVX(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2088 return o<<26 | xo | oe<<11 | rc&1
2091 func OPVC(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2092 return o<<26 | xo | oe<<11 | (rc&1)<<10
2095 func OPVCC(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2096 return o<<26 | xo<<1 | oe<<10 | rc&1
2099 func OPCC(o uint32, xo uint32, rc uint32) uint32 {
2100 return OPVCC(o, xo, 0, rc)
2103 /* Generate MD-form opcode */
2104 func OPMD(o, xo, rc uint32) uint32 {
2105 return o<<26 | xo<<2 | rc&1
2108 /* the order is dest, a/s, b/imm for both arithmetic and logical operations. */
2109 func AOP_RRR(op uint32, d uint32, a uint32, b uint32) uint32 {
2110 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11
2113 /* VX-form 2-register operands, r/none/r */
2114 func AOP_RR(op uint32, d uint32, a uint32) uint32 {
2115 return op | (d&31)<<21 | (a&31)<<11
2118 /* VA-form 4-register operands */
2119 func AOP_RRRR(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2120 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&31)<<6
2123 func AOP_IRR(op uint32, d uint32, a uint32, simm uint32) uint32 {
2124 return op | (d&31)<<21 | (a&31)<<16 | simm&0xFFFF
2127 /* VX-form 2-register + UIM operands */
2128 func AOP_VIRR(op uint32, d uint32, a uint32, simm uint32) uint32 {
2129 return op | (d&31)<<21 | (simm&0xFFFF)<<16 | (a&31)<<11
2132 /* VX-form 2-register + ST + SIX operands */
2133 func AOP_IIRR(op uint32, d uint32, a uint32, sbit uint32, simm uint32) uint32 {
2134 return op | (d&31)<<21 | (a&31)<<16 | (sbit&1)<<15 | (simm&0xF)<<11
2137 /* VA-form 3-register + SHB operands */
2138 func AOP_IRRR(op uint32, d uint32, a uint32, b uint32, simm uint32) uint32 {
2139 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (simm&0xF)<<6
2142 /* VX-form 1-register + SIM operands */
2143 func AOP_IR(op uint32, d uint32, simm uint32) uint32 {
2144 return op | (d&31)<<21 | (simm&31)<<16
2147 /* XX1-form 3-register operands, 1 VSR operand */
2148 func AOP_XX1(op uint32, r uint32, a uint32, b uint32) uint32 {
2149 return op | (r&31)<<21 | (a&31)<<16 | (b&31)<<11 | (r&32)>>5
2152 /* XX2-form 3-register operands, 2 VSR operands */
2153 func AOP_XX2(op uint32, xt uint32, a uint32, xb uint32) uint32 {
2154 return op | (xt&31)<<21 | (a&3)<<16 | (xb&31)<<11 | (xb&32)>>4 | (xt&32)>>5
2157 /* XX3-form 3 VSR operands */
2158 func AOP_XX3(op uint32, xt uint32, xa uint32, xb uint32) uint32 {
2159 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2162 /* XX3-form 3 VSR operands + immediate */
2163 func AOP_XX3I(op uint32, xt uint32, xa uint32, xb uint32, c uint32) uint32 {
2164 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (c&3)<<8 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2167 /* XX4-form, 4 VSR operands */
2168 func AOP_XX4(op uint32, xt uint32, xa uint32, xb uint32, xc uint32) uint32 {
2169 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xc&31)<<6 | (xc&32)>>2 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2172 /* DQ-form, VSR register, register + offset operands */
2173 func AOP_DQ(op uint32, xt uint32, a uint32, b uint32) uint32 {
2174 /* The EA for this instruction form is (RA) + DQ << 4, where DQ is a 12-bit signed integer. */
2175 /* In order to match the output of the GNU objdump (and make the usage in Go asm easier), the */
2176 /* instruction is called using the sign extended value (i.e. a valid offset would be -32752 or 32752, */
2177 /* not -2047 or 2047), so 'b' needs to be adjusted to the expected 12-bit DQ value. Bear in mind that */
2178 /* bits 0 to 3 in 'dq' need to be zero, otherwise this will generate an illegal instruction. */
2179 /* If in doubt how this instruction form is encoded, refer to ISA 3.0b, pages 492 and 507. */
2181 return op | (xt&31)<<21 | (a&31)<<16 | (dq&4095)<<4 | (xt&32)>>2
2184 /* Z23-form, 3-register operands + CY field */
2185 func AOP_Z23I(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2186 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&3)<<9
2189 /* X-form, 3-register operands + EH field */
2190 func AOP_RRRI(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2191 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c & 1)
2194 func LOP_RRR(op uint32, a uint32, s uint32, b uint32) uint32 {
2195 return op | (s&31)<<21 | (a&31)<<16 | (b&31)<<11
2198 func LOP_IRR(op uint32, a uint32, s uint32, uimm uint32) uint32 {
2199 return op | (s&31)<<21 | (a&31)<<16 | uimm&0xFFFF
2202 func OP_BR(op uint32, li uint32, aa uint32) uint32 {
2203 return op | li&0x03FFFFFC | aa<<1
2206 func OP_BC(op uint32, bo uint32, bi uint32, bd uint32, aa uint32) uint32 {
2207 return op | (bo&0x1F)<<21 | (bi&0x1F)<<16 | bd&0xFFFC | aa<<1
2210 func OP_BCR(op uint32, bo uint32, bi uint32) uint32 {
2211 return op | (bo&0x1F)<<21 | (bi&0x1F)<<16
2214 func OP_RLW(op uint32, a uint32, s uint32, sh uint32, mb uint32, me uint32) uint32 {
2215 return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | (mb&31)<<6 | (me&31)<<1
2218 func AOP_RLDIC(op uint32, a uint32, s uint32, sh uint32, m uint32) uint32 {
2219 return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1 | (m&31)<<6 | ((m&32)>>5)<<5
2222 func AOP_EXTSWSLI(op uint32, a uint32, s uint32, sh uint32) uint32 {
2223 return op | (a&31)<<21 | (s&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1
2226 func AOP_ISEL(op uint32, t uint32, a uint32, b uint32, bc uint32) uint32 {
2227 return op | (t&31)<<21 | (a&31)<<16 | (b&31)<<11 | (bc&0x1F)<<6
2231 /* each rhs is OPVCC(_, _, _, _) */
2232 OP_ADD = 31<<26 | 266<<1 | 0<<10 | 0
2233 OP_ADDI = 14<<26 | 0<<1 | 0<<10 | 0
2234 OP_ADDIS = 15<<26 | 0<<1 | 0<<10 | 0
2235 OP_ANDI = 28<<26 | 0<<1 | 0<<10 | 0
2236 OP_EXTSB = 31<<26 | 954<<1 | 0<<10 | 0
2237 OP_EXTSH = 31<<26 | 922<<1 | 0<<10 | 0
2238 OP_EXTSW = 31<<26 | 986<<1 | 0<<10 | 0
2239 OP_ISEL = 31<<26 | 15<<1 | 0<<10 | 0
2240 OP_MCRF = 19<<26 | 0<<1 | 0<<10 | 0
2241 OP_MCRFS = 63<<26 | 64<<1 | 0<<10 | 0
2242 OP_MCRXR = 31<<26 | 512<<1 | 0<<10 | 0
2243 OP_MFCR = 31<<26 | 19<<1 | 0<<10 | 0
2244 OP_MFFS = 63<<26 | 583<<1 | 0<<10 | 0
2245 OP_MFSPR = 31<<26 | 339<<1 | 0<<10 | 0
2246 OP_MFSR = 31<<26 | 595<<1 | 0<<10 | 0
2247 OP_MFSRIN = 31<<26 | 659<<1 | 0<<10 | 0
2248 OP_MTCRF = 31<<26 | 144<<1 | 0<<10 | 0
2249 OP_MTFSF = 63<<26 | 711<<1 | 0<<10 | 0
2250 OP_MTFSFI = 63<<26 | 134<<1 | 0<<10 | 0
2251 OP_MTSPR = 31<<26 | 467<<1 | 0<<10 | 0
2252 OP_MTSR = 31<<26 | 210<<1 | 0<<10 | 0
2253 OP_MTSRIN = 31<<26 | 242<<1 | 0<<10 | 0
2254 OP_MULLW = 31<<26 | 235<<1 | 0<<10 | 0
2255 OP_MULLD = 31<<26 | 233<<1 | 0<<10 | 0
2256 OP_OR = 31<<26 | 444<<1 | 0<<10 | 0
2257 OP_ORI = 24<<26 | 0<<1 | 0<<10 | 0
2258 OP_ORIS = 25<<26 | 0<<1 | 0<<10 | 0
2259 OP_RLWINM = 21<<26 | 0<<1 | 0<<10 | 0
2260 OP_RLWNM = 23<<26 | 0<<1 | 0<<10 | 0
2261 OP_SUBF = 31<<26 | 40<<1 | 0<<10 | 0
2262 OP_RLDIC = 30<<26 | 4<<1 | 0<<10 | 0
2263 OP_RLDICR = 30<<26 | 2<<1 | 0<<10 | 0
2264 OP_RLDICL = 30<<26 | 0<<1 | 0<<10 | 0
2265 OP_RLDCL = 30<<26 | 8<<1 | 0<<10 | 0
2266 OP_EXTSWSLI = 31<<26 | 445<<2
2269 func oclass(a *obj.Addr) int {
2270 return int(a.Class) - 1
2278 // This function determines when a non-indexed load or store is D or
2279 // DS form for use in finding the size of the offset field in the instruction.
2280 // The size is needed when setting the offset value in the instruction
2281 // and when generating relocation for that field.
2282 // DS form instructions include: ld, ldu, lwa, std, stdu. All other
2283 // loads and stores with an offset field are D form. This function should
2284 // only be called with the same opcodes as are handled by opstore and opload.
2285 func (c *ctxt9) opform(insn uint32) int {
2288 c.ctxt.Diag("bad insn in loadform: %x", insn)
2289 case OPVCC(58, 0, 0, 0), // ld
2290 OPVCC(58, 0, 0, 1), // ldu
2291 OPVCC(58, 0, 0, 0) | 1<<1, // lwa
2292 OPVCC(62, 0, 0, 0), // std
2293 OPVCC(62, 0, 0, 1): //stdu
2295 case OP_ADDI, // add
2296 OPVCC(32, 0, 0, 0), // lwz
2297 OPVCC(33, 0, 0, 0), // lwzu
2298 OPVCC(34, 0, 0, 0), // lbz
2299 OPVCC(35, 0, 0, 0), // lbzu
2300 OPVCC(40, 0, 0, 0), // lhz
2301 OPVCC(41, 0, 0, 0), // lhzu
2302 OPVCC(42, 0, 0, 0), // lha
2303 OPVCC(43, 0, 0, 0), // lhau
2304 OPVCC(46, 0, 0, 0), // lmw
2305 OPVCC(48, 0, 0, 0), // lfs
2306 OPVCC(49, 0, 0, 0), // lfsu
2307 OPVCC(50, 0, 0, 0), // lfd
2308 OPVCC(51, 0, 0, 0), // lfdu
2309 OPVCC(36, 0, 0, 0), // stw
2310 OPVCC(37, 0, 0, 0), // stwu
2311 OPVCC(38, 0, 0, 0), // stb
2312 OPVCC(39, 0, 0, 0), // stbu
2313 OPVCC(44, 0, 0, 0), // sth
2314 OPVCC(45, 0, 0, 0), // sthu
2315 OPVCC(47, 0, 0, 0), // stmw
2316 OPVCC(52, 0, 0, 0), // stfs
2317 OPVCC(53, 0, 0, 0), // stfsu
2318 OPVCC(54, 0, 0, 0), // stfd
2319 OPVCC(55, 0, 0, 0): // stfdu
2325 // Encode instructions and create relocation for accessing s+d according to the
2326 // instruction op with source or destination (as appropriate) register reg.
2327 func (c *ctxt9) symbolAccess(s *obj.LSym, d int64, reg int16, op uint32, reuse bool) (o1, o2 uint32) {
2328 if c.ctxt.Headtype == objabi.Haix {
2329 // Every symbol access must be made via a TOC anchor.
2330 c.ctxt.Diag("symbolAccess called for %s", s.Name)
2333 form := c.opform(op)
2334 if c.ctxt.Flag_shared {
2339 // If reg can be reused when computing the symbol address,
2340 // use it instead of REGTMP.
2342 o1 = AOP_IRR(OP_ADDIS, REGTMP, base, 0)
2343 o2 = AOP_IRR(op, uint32(reg), REGTMP, 0)
2345 o1 = AOP_IRR(OP_ADDIS, uint32(reg), base, 0)
2346 o2 = AOP_IRR(op, uint32(reg), uint32(reg), 0)
2348 rel := obj.Addrel(c.cursym)
2349 rel.Off = int32(c.pc)
2353 if c.ctxt.Flag_shared {
2356 rel.Type = objabi.R_ADDRPOWER_TOCREL
2358 rel.Type = objabi.R_ADDRPOWER_TOCREL_DS
2364 rel.Type = objabi.R_ADDRPOWER
2366 rel.Type = objabi.R_ADDRPOWER_DS
2375 func getmask(m []byte, v uint32) bool {
2378 if v != ^uint32(0) && v&(1<<31) != 0 && v&1 != 0 { /* MB > ME */
2389 for i := 0; i < 32; i++ {
2390 if v&(1<<uint(31-i)) != 0 {
2395 if i >= 32 || v&(1<<uint(31-i)) == 0 {
2401 if v&(1<<uint(31-i)) != 0 {
2412 func (c *ctxt9) maskgen(p *obj.Prog, m []byte, v uint32) {
2414 c.ctxt.Diag("cannot generate mask #%x\n%v", v, p)
2419 * 64-bit masks (rldic etc)
2421 func getmask64(m []byte, v uint64) bool {
2424 for i := 0; i < 64; i++ {
2425 if v&(uint64(1)<<uint(63-i)) != 0 {
2430 if i >= 64 || v&(uint64(1)<<uint(63-i)) == 0 {
2436 if v&(uint64(1)<<uint(63-i)) != 0 {
2447 func (c *ctxt9) maskgen64(p *obj.Prog, m []byte, v uint64) {
2448 if !getmask64(m, v) {
2449 c.ctxt.Diag("cannot generate mask #%x\n%v", v, p)
2453 func loadu32(r int, d int64) uint32 {
2455 if isuint32(uint64(d)) {
2456 return LOP_IRR(OP_ORIS, uint32(r), REGZERO, uint32(v))
2458 return AOP_IRR(OP_ADDIS, uint32(r), REGZERO, uint32(v))
2461 func high16adjusted(d int32) uint16 {
2463 return uint16((d >> 16) + 1)
2465 return uint16(d >> 16)
2468 func asmout(c *ctxt9, p *obj.Prog, o *Optab, out *[5]uint32) {
2475 //print("%v => case %d\n", p, o->type);
2478 c.ctxt.Diag("unknown type %d", o.type_)
2481 case 0: /* pseudo ops */
2484 case 2: /* int/cr/fp op Rb,[Ra],Rd */
2490 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2492 case 3: /* mov $soreg/addcon/andcon/ucon, r ==> addis/oris/addi/ori $i,reg',r */
2493 d := c.vregoff(&p.From)
2496 r := int(p.From.Reg)
2498 r = c.getimpliedreg(&p.From, p)
2500 if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 && (r != 0 || v != 0) {
2501 c.ctxt.Diag("literal operation on R0\n%v", p)
2506 log.Fatalf("invalid handling of %v", p)
2508 // For UCON operands the value is right shifted 16, using ADDIS if the
2509 // value should be signed, ORIS if unsigned.
2511 if r == REGZERO && isuint32(uint64(d)) {
2512 o1 = LOP_IRR(OP_ORIS, uint32(p.To.Reg), REGZERO, uint32(v))
2517 } else if int64(int16(d)) != d {
2518 // Operand is 16 bit value with sign bit set
2519 if o.a1 == C_ANDCON {
2520 // Needs unsigned 16 bit so use ORI
2521 if r == 0 || r == REGZERO {
2522 o1 = LOP_IRR(uint32(OP_ORI), uint32(p.To.Reg), uint32(0), uint32(v))
2525 // With ADDCON, needs signed 16 bit value, fall through to use ADDI
2526 } else if o.a1 != C_ADDCON {
2527 log.Fatalf("invalid handling of %v", p)
2531 o1 = AOP_IRR(uint32(a), uint32(p.To.Reg), uint32(r), uint32(v))
2533 case 4: /* add/mul $scon,[r1],r2 */
2534 v := c.regoff(&p.From)
2540 if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 {
2541 c.ctxt.Diag("literal operation on R0\n%v", p)
2543 if int32(int16(v)) != v {
2544 log.Fatalf("mishandled instruction %v", p)
2546 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2548 case 5: /* syscall */
2551 case 6: /* logical op Rb,[Rs,]Ra; no literal */
2557 // AROTL and AROTLW are extended mnemonics, which map to RLDCL and RLWNM.
2560 o1 = AOP_RLDIC(OP_RLDCL, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), uint32(0))
2562 o1 = OP_RLW(OP_RLWNM, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), 0, 31)
2564 if p.As == AOR && p.From.Type == obj.TYPE_CONST && p.From.Offset == 0 {
2565 // Compile "OR $0, Rx, Ry" into ori. If Rx == Ry == 0, this is the preferred
2566 // hardware no-op. This happens because $0 matches C_REG before C_ZCON.
2567 o1 = LOP_IRR(OP_ORI, uint32(p.To.Reg), uint32(r), 0)
2569 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2573 case 7: /* mov r, soreg ==> stw o(r) */
2577 r = c.getimpliedreg(&p.To, p)
2579 v := c.regoff(&p.To)
2580 if int32(int16(v)) != v {
2581 log.Fatalf("mishandled instruction %v", p)
2583 // Offsets in DS form stores must be a multiple of 4
2584 inst := c.opstore(p.As)
2585 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
2586 log.Fatalf("invalid offset for DS form load/store %v", p)
2588 o1 = AOP_IRR(inst, uint32(p.From.Reg), uint32(r), uint32(v))
2590 case 8: /* mov soreg, r ==> lbz/lhz/lwz o(r), lbz o(r) + extsb r,r */
2591 r := int(p.From.Reg)
2594 r = c.getimpliedreg(&p.From, p)
2596 v := c.regoff(&p.From)
2597 if int32(int16(v)) != v {
2598 log.Fatalf("mishandled instruction %v", p)
2600 // Offsets in DS form loads must be a multiple of 4
2601 inst := c.opload(p.As)
2602 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
2603 log.Fatalf("invalid offset for DS form load/store %v", p)
2605 o1 = AOP_IRR(inst, uint32(p.To.Reg), uint32(r), uint32(v))
2607 // Sign extend MOVB operations. This is ignored for other cases (o.size == 4).
2608 o2 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
2610 case 10: /* sub Ra,[Rb],Rd => subf Rd,Ra,Rb */
2616 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(r))
2618 case 11: /* br/bl lbra */
2621 if p.To.Target() != nil {
2622 v = int32(p.To.Target().Pc - p.Pc)
2624 c.ctxt.Diag("odd branch target address\n%v", p)
2628 if v < -(1<<25) || v >= 1<<24 {
2629 c.ctxt.Diag("branch too far\n%v", p)
2633 o1 = OP_BR(c.opirr(p.As), uint32(v), 0)
2634 if p.To.Sym != nil {
2635 rel := obj.Addrel(c.cursym)
2636 rel.Off = int32(c.pc)
2639 v += int32(p.To.Offset)
2641 c.ctxt.Diag("odd branch target address\n%v", p)
2646 rel.Type = objabi.R_CALLPOWER
2648 o2 = 0x60000000 // nop, sometimes overwritten by ld r2, 24(r1) when dynamic linking
2650 case 13: /* mov[bhwd]{z,} r,r */
2651 // This needs to handle "MOV* $0, Rx". This shows up because $0 also
2652 // matches C_REG if r0iszero. This happens because C_REG sorts before C_ANDCON
2653 // TODO: fix the above behavior and cleanup this exception.
2654 if p.From.Type == obj.TYPE_CONST {
2655 o1 = LOP_IRR(OP_ADDI, REGZERO, uint32(p.To.Reg), 0)
2658 if p.To.Type == obj.TYPE_CONST {
2659 c.ctxt.Diag("cannot move into constant 0\n%v", p)
2664 o1 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2666 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 24, 31)
2668 o1 = LOP_RRR(OP_EXTSH, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2670 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 16, 31)
2672 o1 = LOP_RRR(OP_EXTSW, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2674 o1 = OP_RLW(OP_RLDIC, uint32(p.To.Reg), uint32(p.From.Reg), 0, 0, 0) | 1<<5 /* MB=32 */
2676 o1 = LOP_RRR(OP_OR, uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.From.Reg))
2678 c.ctxt.Diag("internal: bad register move/truncation\n%v", p)
2681 case 14: /* rldc[lr] Rb,Rs,$mask,Ra -- left, right give different masks */
2687 d := c.vregoff(p.GetFrom3())
2691 // These opcodes expect a mask operand that has to be converted into the
2692 // appropriate operand. The way these were defined, not all valid masks are possible.
2693 // Left here for compatibility in case they were used or generated.
2694 case ARLDCL, ARLDCLCC:
2696 c.maskgen64(p, mask[:], uint64(d))
2698 a = int(mask[0]) /* MB */
2700 c.ctxt.Diag("invalid mask for rotate: %x (end != bit 63)\n%v", uint64(d), p)
2702 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2703 o1 |= (uint32(a) & 31) << 6
2705 o1 |= 1 << 5 /* mb[5] is top bit */
2708 case ARLDCR, ARLDCRCC:
2710 c.maskgen64(p, mask[:], uint64(d))
2712 a = int(mask[1]) /* ME */
2714 c.ctxt.Diag("invalid mask for rotate: %x %x (start != 0)\n%v", uint64(d), mask[0], p)
2716 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2717 o1 |= (uint32(a) & 31) << 6
2719 o1 |= 1 << 5 /* mb[5] is top bit */
2722 // These opcodes use a shift count like the ppc64 asm, no mask conversion done
2723 case ARLDICR, ARLDICRCC:
2725 sh := c.regoff(&p.From)
2726 if me < 0 || me > 63 || sh > 63 {
2727 c.ctxt.Diag("Invalid me or sh for RLDICR: %x %x\n%v", int(d), sh, p)
2729 o1 = AOP_RLDIC(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(sh), uint32(me))
2731 case ARLDICL, ARLDICLCC, ARLDIC, ARLDICCC:
2733 sh := c.regoff(&p.From)
2734 if mb < 0 || mb > 63 || sh > 63 {
2735 c.ctxt.Diag("Invalid mb or sh for RLDIC, RLDICL: %x %x\n%v", mb, sh, p)
2737 o1 = AOP_RLDIC(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(sh), uint32(mb))
2740 // This is an extended mnemonic defined in the ISA section C.8.1
2741 // clrlsldi ra,rs,b,n --> rldic ra,rs,n,b-n
2742 // It maps onto RLDIC so is directly generated here based on the operands from
2745 b := c.regoff(&p.From)
2746 if n > b || b > 63 {
2747 c.ctxt.Diag("Invalid n or b for CLRLSLDI: %x %x\n%v", n, b, p)
2749 o1 = AOP_RLDIC(OP_RLDIC, uint32(p.To.Reg), uint32(r), uint32(n), uint32(b)-uint32(n))
2752 c.ctxt.Diag("unexpected op in rldc case\n%v", p)
2756 case 17, /* bc bo,bi,lbra (same for now) */
2757 16: /* bc bo,bi,sbra */
2762 if p.From.Type == obj.TYPE_CONST {
2763 a = int(c.regoff(&p.From))
2764 } else if p.From.Type == obj.TYPE_REG {
2766 c.ctxt.Diag("unexpected register setting for branch with CR: %d\n", r)
2768 // BI values for the CR
2787 c.ctxt.Diag("unrecognized register: expecting CR\n")
2791 if p.To.Target() != nil {
2792 v = int32(p.To.Target().Pc - p.Pc)
2795 c.ctxt.Diag("odd branch target address\n%v", p)
2799 if v < -(1<<16) || v >= 1<<15 {
2800 c.ctxt.Diag("branch too far\n%v", p)
2802 o1 = OP_BC(c.opirr(p.As), uint32(a), uint32(r), uint32(v), 0)
2804 case 18: /* br/bl (lr/ctr); bc/bcl bo,bi,(lr/ctr) */
2807 if p.As == ABC || p.As == ABCL {
2808 v = c.regoff(&p.From) & 31
2810 v = 20 /* unconditional */
2816 switch oclass(&p.To) {
2818 o1 = OPVCC(19, 528, 0, 0)
2821 o1 = OPVCC(19, 16, 0, 0)
2824 c.ctxt.Diag("bad optab entry (18): %d\n%v", p.To.Class, p)
2828 // Insert optional branch hint for bclr[l]/bcctr[l]
2829 if p.From3Type() != obj.TYPE_NONE {
2830 bh = uint32(p.GetFrom3().Offset)
2831 if bh == 2 || bh > 3 {
2832 log.Fatalf("BH must be 0,1,3 for %v", p)
2837 if p.As == ABL || p.As == ABCL {
2840 o1 = OP_BCR(o1, uint32(v), uint32(r))
2842 case 19: /* mov $lcon,r ==> cau+or */
2843 d := c.vregoff(&p.From)
2844 o1 = loadu32(int(p.To.Reg), d)
2845 o2 = LOP_IRR(OP_ORI, uint32(p.To.Reg), uint32(p.To.Reg), uint32(int32(d)))
2847 case 20: /* add $ucon,,r | addis $addcon,r,r */
2848 v := c.regoff(&p.From)
2854 if p.As == AADD && (r0iszero == 0 /*TypeKind(100016)*/ && p.Reg == 0 || r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0) {
2855 c.ctxt.Diag("literal operation on R0\n%v", p)
2858 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2860 o1 = AOP_IRR(c.opirr(AADDIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
2863 case 22: /* add $lcon/$andcon,r1,r2 ==> oris+ori+add/ori+add */
2864 if p.To.Reg == REGTMP || p.Reg == REGTMP {
2865 c.ctxt.Diag("can't synthesize large constant\n%v", p)
2867 d := c.vregoff(&p.From)
2872 if p.From.Sym != nil {
2873 c.ctxt.Diag("%v is not supported", p)
2875 // If operand is ANDCON, generate 2 instructions using
2876 // ORI for unsigned value; with LCON 3 instructions.
2878 o1 = LOP_IRR(OP_ORI, REGTMP, REGZERO, uint32(int32(d)))
2879 o2 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2881 o1 = loadu32(REGTMP, d)
2882 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d)))
2883 o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2886 case 23: /* and $lcon/$addcon,r1,r2 ==> oris+ori+and/addi+and */
2887 if p.To.Reg == REGTMP || p.Reg == REGTMP {
2888 c.ctxt.Diag("can't synthesize large constant\n%v", p)
2890 d := c.vregoff(&p.From)
2896 // With ADDCON operand, generate 2 instructions using ADDI for signed value,
2897 // with LCON operand generate 3 instructions.
2899 o1 = LOP_IRR(OP_ADDI, REGZERO, REGTMP, uint32(int32(d)))
2900 o2 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2902 o1 = loadu32(REGTMP, d)
2903 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d)))
2904 o3 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2906 if p.From.Sym != nil {
2907 c.ctxt.Diag("%v is not supported", p)
2910 case 24: /* lfd fA,float64(0) -> xxlxor xsA,xsaA,xsaA + fneg for -0 */
2911 o1 = AOP_XX3I(c.oprrr(AXXLXOR), uint32(p.To.Reg), uint32(p.To.Reg), uint32(p.To.Reg), uint32(0))
2912 // This is needed for -0.
2914 o2 = AOP_RRR(c.oprrr(AFNEG), uint32(p.To.Reg), 0, uint32(p.To.Reg))
2918 /* sld[.] $sh,rS,rA -> rldicr[.] $sh,rS,mask(0,63-sh),rA; srd[.] -> rldicl */
2919 v := c.regoff(&p.From)
2944 case AEXTSWSLI, AEXTSWSLICC:
2947 c.ctxt.Diag("unexpected op in sldi case\n%v", p)
2952 if p.As == AEXTSWSLI || p.As == AEXTSWSLICC {
2953 o1 = AOP_EXTSWSLI(OP_EXTSWSLI, uint32(r), uint32(p.To.Reg), uint32(v))
2956 o1 = AOP_RLDIC(op, uint32(p.To.Reg), uint32(r), uint32(v), uint32(a))
2958 if p.As == ASLDCC || p.As == ASRDCC || p.As == AEXTSWSLICC {
2959 o1 |= 1 // Set the condition code bit
2962 case 26: /* mov $lsext/auto/oreg,,r2 ==> addis+addi */
2963 v := c.vregoff(&p.From)
2964 r := int(p.From.Reg)
2966 switch p.From.Name {
2967 case obj.NAME_EXTERN, obj.NAME_STATIC:
2968 // Load a 32 bit constant, or relocation depending on if a symbol is attached
2969 o1, o2 = c.symbolAccess(p.From.Sym, v, p.To.Reg, OP_ADDI, true)
2972 r = c.getimpliedreg(&p.From, p)
2974 // Add a 32 bit offset to a register.
2975 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), uint32(r), uint32(high16adjusted(int32(v))))
2976 o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), uint32(p.To.Reg), uint32(v))
2979 case 27: /* subc ra,$simm,rd => subfic rd,ra,$simm */
2980 v := c.regoff(p.GetFrom3())
2982 r := int(p.From.Reg)
2983 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2985 case 28: /* subc r1,$lcon,r2 ==> cau+or+subfc */
2986 if p.To.Reg == REGTMP || p.From.Reg == REGTMP {
2987 c.ctxt.Diag("can't synthesize large constant\n%v", p)
2989 v := c.regoff(p.GetFrom3())
2990 o1 = AOP_IRR(OP_ADDIS, REGTMP, REGZERO, uint32(v)>>16)
2991 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(v))
2992 o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), REGTMP)
2993 if p.From.Sym != nil {
2994 c.ctxt.Diag("%v is not supported", p)
2997 case 29: /* rldic[lr]? $sh,s,$mask,a -- left, right, plain give different masks */
2998 v := c.regoff(&p.From)
3000 d := c.vregoff(p.GetFrom3())
3002 c.maskgen64(p, mask[:], uint64(d))
3005 case ARLDC, ARLDCCC:
3006 a = int(mask[0]) /* MB */
3007 if int32(mask[1]) != (63 - v) {
3008 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[1], v, p)
3011 case ARLDCL, ARLDCLCC:
3012 a = int(mask[0]) /* MB */
3014 c.ctxt.Diag("invalid mask for shift: %x %s (shift %d)\n%v", uint64(d), mask[1], v, p)
3017 case ARLDCR, ARLDCRCC:
3018 a = int(mask[1]) /* ME */
3020 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[0], v, p)
3024 c.ctxt.Diag("unexpected op in rldic case\n%v", p)
3028 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
3029 o1 |= (uint32(a) & 31) << 6
3034 o1 |= 1 << 5 /* mb[5] is top bit */
3037 case 30: /* rldimi $sh,s,$mask,a */
3038 v := c.regoff(&p.From)
3040 d := c.vregoff(p.GetFrom3())
3042 // Original opcodes had mask operands which had to be converted to a shift count as expected by
3045 case ARLDMI, ARLDMICC:
3047 c.maskgen64(p, mask[:], uint64(d))
3048 if int32(mask[1]) != (63 - v) {
3049 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[1], v, p)
3051 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
3052 o1 |= (uint32(mask[0]) & 31) << 6
3056 if mask[0]&0x20 != 0 {
3057 o1 |= 1 << 5 /* mb[5] is top bit */
3060 // Opcodes with shift count operands.
3061 case ARLDIMI, ARLDIMICC:
3062 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
3063 o1 |= (uint32(d) & 31) << 6
3072 case 31: /* dword */
3073 d := c.vregoff(&p.From)
3075 if c.ctxt.Arch.ByteOrder == binary.BigEndian {
3076 o1 = uint32(d >> 32)
3080 o2 = uint32(d >> 32)
3083 if p.From.Sym != nil {
3084 rel := obj.Addrel(c.cursym)
3085 rel.Off = int32(c.pc)
3087 rel.Sym = p.From.Sym
3088 rel.Add = p.From.Offset
3089 rel.Type = objabi.R_ADDR
3094 case 32: /* fmul frc,fra,frd */
3100 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0) | (uint32(p.From.Reg)&31)<<6
3102 case 33: /* fabs [frb,]frd; fmr. frb,frd */
3103 r := int(p.From.Reg)
3105 if oclass(&p.From) == C_NONE {
3108 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(r))
3110 case 34: /* FMADDx fra,frb,frc,frt (t=a*c±b) */
3111 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg)) | (uint32(p.GetFrom3().Reg)&31)<<6
3113 case 35: /* mov r,lext/lauto/loreg ==> cau $(v>>16),sb,r'; store o(r') */
3114 v := c.regoff(&p.To)
3118 r = c.getimpliedreg(&p.To, p)
3120 // Offsets in DS form stores must be a multiple of 4
3121 inst := c.opstore(p.As)
3122 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3123 log.Fatalf("invalid offset for DS form load/store %v", p)
3125 o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v)))
3126 o2 = AOP_IRR(inst, uint32(p.From.Reg), REGTMP, uint32(v))
3128 case 36: /* mov b/bz/h/hz lext/lauto/lreg,r ==> lbz+extsb/lbz/lha/lhz etc */
3129 v := c.regoff(&p.From)
3131 r := int(p.From.Reg)
3133 r = c.getimpliedreg(&p.From, p)
3135 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), uint32(r), uint32(high16adjusted(v)))
3136 o2 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), uint32(p.To.Reg), uint32(v))
3138 // Sign extend MOVB if needed
3139 o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3142 o1 = uint32(c.regoff(&p.From))
3144 case 41: /* stswi */
3145 if p.To.Type == obj.TYPE_MEM && p.To.Index == 0 && p.To.Offset != 0 {
3146 c.ctxt.Diag("Invalid addressing mode used in index type instruction: %v", p.As)
3149 o1 = AOP_RRR(c.opirr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11
3152 if p.From.Type == obj.TYPE_MEM && p.From.Index == 0 && p.From.Offset != 0 {
3153 c.ctxt.Diag("Invalid addressing mode used in index type instruction: %v", p.As)
3155 o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11
3157 case 43: /* data cache instructions: op (Ra+[Rb]), [th|l] */
3158 /* TH field for dcbt/dcbtst: */
3159 /* 0 = Block access - program will soon access EA. */
3160 /* 8-15 = Stream access - sequence of access (data stream). See section 4.3.2 of the ISA for details. */
3161 /* 16 = Block access - program will soon make a transient access to EA. */
3162 /* 17 = Block access - program will not access EA for a long time. */
3164 /* L field for dcbf: */
3165 /* 0 = invalidates the block containing EA in all processors. */
3166 /* 1 = same as 0, but with limited scope (i.e. block in the current processor will not be reused soon). */
3167 /* 3 = same as 1, but with even more limited scope (i.e. block in the current processor primary cache will not be reused soon). */
3168 if p.To.Type == obj.TYPE_NONE {
3169 o1 = AOP_RRR(c.oprrr(p.As), 0, uint32(p.From.Index), uint32(p.From.Reg))
3171 th := c.regoff(&p.To)
3172 o1 = AOP_RRR(c.oprrr(p.As), uint32(th), uint32(p.From.Index), uint32(p.From.Reg))
3175 case 44: /* indexed store */
3176 o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg))
3178 case 45: /* indexed load */
3180 /* The assembler accepts a 4-operand l*arx instruction. The fourth operand is an Exclusive Access Hint (EH) */
3181 /* The EH field can be used as a lock acquire/release hint as follows: */
3182 /* 0 = Atomic Update (fetch-and-operate or similar algorithm) */
3183 /* 1 = Exclusive Access (lock acquire and release) */
3184 case ALBAR, ALHAR, ALWAR, ALDAR:
3185 if p.From3Type() != obj.TYPE_NONE {
3186 eh := int(c.regoff(p.GetFrom3()))
3188 c.ctxt.Diag("illegal EH field\n%v", p)
3190 o1 = AOP_RRRI(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg), uint32(eh))
3192 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3195 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3197 case 46: /* plain op */
3200 case 47: /* op Ra, Rd; also op [Ra,] Rd */
3201 r := int(p.From.Reg)
3206 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0)
3208 case 48: /* op Rs, Ra */
3209 r := int(p.From.Reg)
3214 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0)
3216 case 49: /* op Rb; op $n, Rb */
3217 if p.From.Type != obj.TYPE_REG { /* tlbie $L, rB */
3218 v := c.regoff(&p.From) & 1
3219 o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.To.Reg)) | uint32(v)<<21
3221 o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.From.Reg))
3224 case 50: /* rem[u] r1[,r2],r3 */
3231 t := v & (1<<10 | 1) /* OE|Rc */
3232 o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg))
3233 o2 = AOP_RRR(OP_MULLW, REGTMP, REGTMP, uint32(p.From.Reg))
3234 o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r))
3238 /* Clear top 32 bits */
3239 o3 = OP_RLW(OP_RLDIC, REGTMP, REGTMP, 0, 0, 0) | 1<<5
3242 case 51: /* remd[u] r1[,r2],r3 */
3249 t := v & (1<<10 | 1) /* OE|Rc */
3250 o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg))
3251 o2 = AOP_RRR(OP_MULLD, REGTMP, REGTMP, uint32(p.From.Reg))
3252 o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r))
3253 /* cases 50,51: removed; can be reused. */
3255 /* cases 50,51: removed; can be reused. */
3257 case 52: /* mtfsbNx cr(n) */
3258 v := c.regoff(&p.From) & 31
3260 o1 = AOP_RRR(c.oprrr(p.As), uint32(v), 0, 0)
3262 case 53: /* mffsX ,fr1 */
3263 o1 = AOP_RRR(OP_MFFS, uint32(p.To.Reg), 0, 0)
3265 case 55: /* op Rb, Rd */
3266 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(p.From.Reg))
3268 case 56: /* sra $sh,[s,]a; srd $sh,[s,]a */
3269 v := c.regoff(&p.From)
3275 o1 = AOP_RRR(c.opirr(p.As), uint32(r), uint32(p.To.Reg), uint32(v)&31)
3276 if (p.As == ASRAD || p.As == ASRADCC) && (v&0x20 != 0) {
3277 o1 |= 1 << 1 /* mb[5] */
3280 case 57: /* slw $sh,[s,]a -> rlwinm ... */
3281 v := c.regoff(&p.From)
3289 * Let user (gs) shoot himself in the foot.
3290 * qc has already complained.
3293 ctxt->diag("illegal shift %ld\n%v", v, p);
3303 mask[0], mask[1] = 0, 31
3305 mask[0], mask[1] = uint8(v), 31
3308 mask[0], mask[1] = 0, uint8(31-v)
3310 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(r), uint32(v), uint32(mask[0]), uint32(mask[1]))
3311 if p.As == ASLWCC || p.As == ASRWCC {
3312 o1 |= 1 // set the condition code
3315 case 58: /* logical $andcon,[s],a */
3316 v := c.regoff(&p.From)
3322 o1 = LOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3324 case 59: /* or/xor/and $ucon,,r | oris/xoris/andis $addcon,r,r */
3325 v := c.regoff(&p.From)
3333 o1 = LOP_IRR(c.opirr(AORIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16) /* oris, xoris, andis. */
3335 o1 = LOP_IRR(c.opirr(AXORIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
3337 o1 = LOP_IRR(c.opirr(AANDISCC), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
3339 o1 = LOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3342 case 60: /* tw to,a,b */
3343 r := int(c.regoff(&p.From) & 31)
3345 o1 = AOP_RRR(c.oprrr(p.As), uint32(r), uint32(p.Reg), uint32(p.To.Reg))
3347 case 61: /* tw to,a,$simm */
3348 r := int(c.regoff(&p.From) & 31)
3350 v := c.regoff(&p.To)
3351 o1 = AOP_IRR(c.opirr(p.As), uint32(r), uint32(p.Reg), uint32(v))
3353 case 62: /* rlwmi $sh,s,$mask,a */
3354 v := c.regoff(&p.From)
3357 n := c.regoff(p.GetFrom3())
3358 // This is an extended mnemonic described in the ISA C.8.2
3359 // clrlslwi ra,rs,b,n -> rlwinm ra,rs,n,b-n,31-n
3360 // It maps onto rlwinm which is directly generated here.
3361 if n > v || v >= 32 {
3362 c.ctxt.Diag("Invalid n or b for CLRLSLWI: %x %x\n%v", v, n, p)
3365 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.Reg), uint32(n), uint32(v-n), uint32(31-n))
3368 c.maskgen(p, mask[:], uint32(c.regoff(p.GetFrom3())))
3369 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), uint32(v))
3370 o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1
3373 case 63: /* rlwmi b,s,$mask,a */
3375 c.maskgen(p, mask[:], uint32(c.regoff(p.GetFrom3())))
3376 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.Reg), uint32(p.To.Reg), uint32(p.From.Reg))
3377 o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1
3379 case 64: /* mtfsf fr[, $m] {,fpcsr} */
3381 if p.From3Type() != obj.TYPE_NONE {
3382 v = c.regoff(p.GetFrom3()) & 255
3386 o1 = OP_MTFSF | uint32(v)<<17 | uint32(p.From.Reg)<<11
3388 case 65: /* MOVFL $imm,FPSCR(n) => mtfsfi crfd,imm */
3390 c.ctxt.Diag("must specify FPSCR(n)\n%v", p)
3392 o1 = OP_MTFSFI | (uint32(p.To.Reg)&15)<<23 | (uint32(c.regoff(&p.From))&31)<<12
3394 case 66: /* mov spr,r1; mov r1,spr */
3397 if REG_R0 <= p.From.Reg && p.From.Reg <= REG_R31 {
3400 o1 = OPVCC(31, 467, 0, 0) /* mtspr */
3403 v = int32(p.From.Reg)
3404 o1 = OPVCC(31, 339, 0, 0) /* mfspr */
3407 o1 = AOP_RRR(o1, uint32(r), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11
3409 case 67: /* mcrf crfD,crfS */
3410 if p.From.Reg == REG_CR || p.To.Reg == REG_CR {
3411 c.ctxt.Diag("CR argument must be a conditional register field (CR0-CR7)\n%v", p)
3413 o1 = AOP_RRR(OP_MCRF, ((uint32(p.To.Reg) & 7) << 2), ((uint32(p.From.Reg) & 7) << 2), 0)
3415 case 68: /* mfcr rD; mfocrf CRM,rD */
3416 o1 = AOP_RRR(OP_MFCR, uint32(p.To.Reg), 0, 0) /* form, whole register */
3417 if p.From.Reg != REG_CR {
3418 v := uint32(1) << uint(7-(p.From.Reg&7)) /* CR(n) */
3419 o1 |= 1<<20 | v<<12 /* new form, mfocrf */
3422 case 69: /* mtcrf CRM,rS, mtocrf CRx,rS */
3424 if p.To.Reg == REG_CR {
3426 } else if p.To.Offset != 0 { // MOVFL gpr, constant
3427 v = uint32(p.To.Offset)
3428 } else { // p.To.Reg == REG_CRx
3429 v = 1 << uint(7-(p.To.Reg&7))
3431 // Use mtocrf form if only one CR field moved.
3432 if bits.OnesCount32(v) == 1 {
3436 o1 = AOP_RRR(OP_MTCRF, uint32(p.From.Reg), 0, 0) | uint32(v)<<12
3438 case 70: /* [f]cmp r,r,cr*/
3443 r = (int(p.Reg) & 7) << 2
3445 o1 = AOP_RRR(c.oprrr(p.As), uint32(r), uint32(p.From.Reg), uint32(p.To.Reg))
3447 case 71: /* cmp[l] r,i,cr*/
3452 r = (int(p.Reg) & 7) << 2
3454 o1 = AOP_RRR(c.opirr(p.As), uint32(r), uint32(p.From.Reg), 0) | uint32(c.regoff(&p.To))&0xffff
3456 case 72: /* slbmte (Rb+Rs -> slb[Rb]) -> Rs, Rb */
3457 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), 0, uint32(p.To.Reg))
3459 case 73: /* mcrfs crfD,crfS */
3460 if p.From.Type != obj.TYPE_REG || p.From.Reg != REG_FPSCR || p.To.Type != obj.TYPE_REG || p.To.Reg < REG_CR0 || REG_CR7 < p.To.Reg {
3461 c.ctxt.Diag("illegal FPSCR/CR field number\n%v", p)
3463 o1 = AOP_RRR(OP_MCRFS, ((uint32(p.To.Reg) & 7) << 2), ((0 & 7) << 2), 0)
3465 case 77: /* syscall $scon, syscall Rx */
3466 if p.From.Type == obj.TYPE_CONST {
3467 if p.From.Offset > BIG || p.From.Offset < -BIG {
3468 c.ctxt.Diag("illegal syscall, sysnum too large: %v", p)
3470 o1 = AOP_IRR(OP_ADDI, REGZERO, REGZERO, uint32(p.From.Offset))
3471 } else if p.From.Type == obj.TYPE_REG {
3472 o1 = LOP_RRR(OP_OR, REGZERO, uint32(p.From.Reg), uint32(p.From.Reg))
3474 c.ctxt.Diag("illegal syscall: %v", p)
3475 o1 = 0x7fe00008 // trap always
3479 o3 = AOP_RRR(c.oprrr(AXOR), REGZERO, REGZERO, REGZERO) // XOR R0, R0
3481 case 78: /* undef */
3482 o1 = 0 /* "An instruction consisting entirely of binary 0s is guaranteed
3483 always to be an illegal instruction." */
3485 /* relocation operations */
3487 v := c.vregoff(&p.To)
3488 // Offsets in DS form stores must be a multiple of 4
3489 inst := c.opstore(p.As)
3490 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3491 log.Fatalf("invalid offset for DS form load/store %v", p)
3493 // Can't reuse base for store instructions.
3494 o1, o2 = c.symbolAccess(p.To.Sym, v, p.From.Reg, inst, false)
3496 case 75: // 32 bit offset symbol loads (got/toc/addr)
3499 // Offsets in DS form loads must be a multiple of 4
3500 inst := c.opload(p.As)
3501 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3502 log.Fatalf("invalid offset for DS form load/store %v", p)
3504 switch p.From.Name {
3505 case obj.NAME_GOTREF, obj.NAME_TOCREF:
3507 c.ctxt.Diag("invalid offset for GOT/TOC access %v", p)
3509 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
3510 o2 = AOP_IRR(inst, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3511 rel := obj.Addrel(c.cursym)
3512 rel.Off = int32(c.pc)
3514 rel.Sym = p.From.Sym
3515 switch p.From.Name {
3516 case obj.NAME_GOTREF:
3517 rel.Type = objabi.R_ADDRPOWER_GOT
3518 case obj.NAME_TOCREF:
3519 rel.Type = objabi.R_ADDRPOWER_TOCREL_DS
3522 reuseBaseReg := p.As != AFMOVD && p.As != AFMOVS
3523 // Reuse To.Reg as base register if not FP move.
3524 o1, o2 = c.symbolAccess(p.From.Sym, v, p.To.Reg, inst, reuseBaseReg)
3527 o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3530 if p.From.Offset != 0 {
3531 c.ctxt.Diag("invalid offset against tls var %v", p)
3533 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R13, 0)
3534 o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3535 rel := obj.Addrel(c.cursym)
3536 rel.Off = int32(c.pc)
3538 rel.Sym = p.From.Sym
3539 rel.Type = objabi.R_POWER_TLS_LE
3542 if p.From.Offset != 0 {
3543 c.ctxt.Diag("invalid offset against tls var %v", p)
3545 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
3546 o2 = AOP_IRR(c.opload(AMOVD), uint32(p.To.Reg), uint32(p.To.Reg), 0)
3547 o3 = AOP_RRR(OP_ADD, uint32(p.To.Reg), uint32(p.To.Reg), REG_R13)
3548 rel := obj.Addrel(c.cursym)
3549 rel.Off = int32(c.pc)
3551 rel.Sym = p.From.Sym
3552 rel.Type = objabi.R_POWER_TLS_IE
3553 rel = obj.Addrel(c.cursym)
3554 rel.Off = int32(c.pc) + 8
3556 rel.Sym = p.From.Sym
3557 rel.Type = objabi.R_POWER_TLS
3559 case 82: /* vector instructions, VX-form and VC-form */
3560 if p.From.Type == obj.TYPE_REG {
3561 /* reg reg none OR reg reg reg */
3562 /* 3-register operand order: VRA, VRB, VRT */
3563 /* 2-register operand order: VRA, VRT */
3564 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3565 } else if p.From3Type() == obj.TYPE_CONST {
3566 /* imm imm reg reg */
3567 /* operand order: SIX, VRA, ST, VRT */
3568 six := int(c.regoff(&p.From))
3569 st := int(c.regoff(p.GetFrom3()))
3570 o1 = AOP_IIRR(c.opiirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(st), uint32(six))
3571 } else if p.From3Type() == obj.TYPE_NONE && p.Reg != 0 {
3573 /* operand order: UIM, VRB, VRT */
3574 uim := int(c.regoff(&p.From))
3575 o1 = AOP_VIRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(uim))
3578 /* operand order: SIM, VRT */
3579 sim := int(c.regoff(&p.From))
3580 o1 = AOP_IR(c.opirr(p.As), uint32(p.To.Reg), uint32(sim))
3583 case 83: /* vector instructions, VA-form */
3584 if p.From.Type == obj.TYPE_REG {
3585 /* reg reg reg reg */
3586 /* 4-register operand order: VRA, VRB, VRC, VRT */
3587 o1 = AOP_RRRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg))
3588 } else if p.From.Type == obj.TYPE_CONST {
3589 /* imm reg reg reg */
3590 /* operand order: SHB, VRA, VRB, VRT */
3591 shb := int(c.regoff(&p.From))
3592 o1 = AOP_IRRR(c.opirrr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(shb))
3595 case 84: // ISEL BC,RA,RB,RT -> isel rt,ra,rb,bc
3596 bc := c.vregoff(&p.From)
3597 if o.a1 == C_CRBIT {
3598 // CR bit is encoded as a register, not a constant.
3599 bc = int64(p.From.Reg)
3602 // rt = To.Reg, ra = p.Reg, rb = p.From3.Reg
3603 o1 = AOP_ISEL(OP_ISEL, uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(bc))
3605 case 85: /* vector instructions, VX-form */
3607 /* 2-register operand order: VRB, VRT */
3608 o1 = AOP_RR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg))
3610 case 86: /* VSX indexed store, XX1-form */
3612 /* 3-register operand order: XT, (RB)(RA*1) */
3613 o1 = AOP_XX1(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg))
3615 case 87: /* VSX indexed load, XX1-form */
3617 /* 3-register operand order: (RB)(RA*1), XT */
3618 o1 = AOP_XX1(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3620 case 88: /* VSX mfvsr* instructions, XX1-form XS,RA */
3621 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3623 case 89: /* VSX instructions, XX2-form */
3624 /* reg none reg OR reg imm reg */
3625 /* 2-register operand order: XB, XT or XB, UIM, XT*/
3626 uim := int(c.regoff(p.GetFrom3()))
3627 o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(uim), uint32(p.From.Reg))
3629 case 90: /* VSX instructions, XX3-form */
3630 if p.From3Type() == obj.TYPE_NONE {
3632 /* 3-register operand order: XA, XB, XT */
3633 o1 = AOP_XX3(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3634 } else if p.From3Type() == obj.TYPE_CONST {
3635 /* reg reg reg imm */
3636 /* operand order: XA, XB, DM, XT */
3637 dm := int(c.regoff(p.GetFrom3()))
3638 o1 = AOP_XX3I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(dm))
3641 case 91: /* VSX instructions, XX4-form */
3642 /* reg reg reg reg */
3643 /* 3-register operand order: XA, XB, XC, XT */
3644 o1 = AOP_XX4(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg))
3646 case 92: /* X-form instructions, 3-operands */
3647 if p.To.Type == obj.TYPE_CONST {
3649 xf := int32(p.From.Reg)
3650 if REG_F0 <= xf && xf <= REG_F31 {
3651 /* operand order: FRA, FRB, BF */
3652 bf := int(c.regoff(&p.To)) << 2
3653 o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg))
3655 /* operand order: RA, RB, L */
3656 l := int(c.regoff(&p.To))
3657 o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.From.Reg), uint32(p.Reg))
3659 } else if p.From3Type() == obj.TYPE_CONST {
3661 /* operand order: RB, L, RA */
3662 l := int(c.regoff(p.GetFrom3()))
3663 o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.To.Reg), uint32(p.From.Reg))
3664 } else if p.To.Type == obj.TYPE_REG {
3665 cr := int32(p.To.Reg)
3666 if REG_CR0 <= cr && cr <= REG_CR7 {
3668 /* operand order: RA, RB, BF */
3669 bf := (int(p.To.Reg) & 7) << 2
3670 o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg))
3671 } else if p.From.Type == obj.TYPE_CONST {
3673 /* operand order: L, RT */
3674 l := int(c.regoff(&p.From))
3675 o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(l), uint32(p.Reg))
3678 case ACOPY, APASTECC:
3679 o1 = AOP_RRR(c.opirr(p.As), uint32(1), uint32(p.From.Reg), uint32(p.To.Reg))
3682 /* operand order: RS, RB, RA */
3683 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3688 case 93: /* X-form instructions, 2-operands */
3689 if p.To.Type == obj.TYPE_CONST {
3691 /* operand order: FRB, BF */
3692 bf := int(c.regoff(&p.To)) << 2
3693 o1 = AOP_RR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg))
3694 } else if p.Reg == 0 {
3695 /* popcnt* r,r, X-form */
3696 /* operand order: RS, RA */
3697 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3700 case 94: /* Z23-form instructions, 4-operands */
3701 /* reg reg reg imm */
3702 /* operand order: RA, RB, CY, RT */
3703 cy := int(c.regoff(p.GetFrom3()))
3704 o1 = AOP_Z23I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(cy))
3706 case 96: /* VSX load, DQ-form */
3708 /* operand order: (RA)(DQ), XT */
3709 dq := int16(c.regoff(&p.From))
3711 c.ctxt.Diag("invalid offset for DQ form load/store %v", dq)
3713 o1 = AOP_DQ(c.opload(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(dq))
3715 case 97: /* VSX store, DQ-form */
3717 /* operand order: XT, (RA)(DQ) */
3718 dq := int16(c.regoff(&p.To))
3720 c.ctxt.Diag("invalid offset for DQ form load/store %v", dq)
3722 o1 = AOP_DQ(c.opstore(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(dq))
3723 case 98: /* VSX indexed load or load with length (also left-justified), x-form */
3724 /* vsreg, reg, reg */
3725 o1 = AOP_XX1(c.opload(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3726 case 99: /* VSX store with length (also left-justified) x-form */
3727 /* reg, reg, vsreg */
3728 o1 = AOP_XX1(c.opstore(p.As), uint32(p.From.Reg), uint32(p.Reg), uint32(p.To.Reg))
3729 case 100: /* VSX X-form XXSPLTIB */
3730 if p.From.Type == obj.TYPE_CONST {
3732 uim := int(c.regoff(&p.From))
3734 /* Use AOP_XX1 form with 0 for one of the registers. */
3735 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.To.Reg), uint32(0), uint32(uim))
3737 c.ctxt.Diag("invalid ops for %v", p.As)
3740 o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(0), uint32(p.From.Reg))
3742 case 102: /* RLWMI $sh,rs,$mb,$me,rt (M-form opcode)*/
3743 mb := uint32(c.regoff(&p.RestArgs[0].Addr))
3744 me := uint32(c.regoff(&p.RestArgs[1].Addr))
3745 sh := uint32(c.regoff(&p.From))
3746 o1 = OP_RLW(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), sh, mb, me)
3748 case 103: /* RLWMI rb,rs,$mb,$me,rt (M-form opcode)*/
3749 mb := uint32(c.regoff(&p.RestArgs[0].Addr))
3750 me := uint32(c.regoff(&p.RestArgs[1].Addr))
3751 o1 = OP_RLW(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.From.Reg), mb, me)
3753 case 104: /* VSX mtvsr* instructions, XX1-form RA,RB,XT */
3754 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3756 case 106: /* MOVD spr, soreg */
3757 v := int32(p.From.Reg)
3758 o1 = OPVCC(31, 339, 0, 0) /* mfspr */
3759 o1 = AOP_RRR(o1, uint32(REGTMP), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11
3760 so := c.regoff(&p.To)
3761 o2 = AOP_IRR(c.opstore(AMOVD), uint32(REGTMP), uint32(p.To.Reg), uint32(so))
3763 log.Fatalf("invalid offset for DS form load/store %v", p)
3765 if p.To.Reg == REGTMP {
3766 log.Fatalf("SPR move to memory will clobber R31 %v", p)
3769 case 107: /* MOVD soreg, spr */
3770 v := int32(p.From.Reg)
3771 so := c.regoff(&p.From)
3772 o1 = AOP_IRR(c.opload(AMOVD), uint32(REGTMP), uint32(v), uint32(so))
3773 o2 = OPVCC(31, 467, 0, 0) /* mtspr */
3775 o2 = AOP_RRR(o2, uint32(REGTMP), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11
3777 log.Fatalf("invalid offset for DS form load/store %v", p)
3780 case 108: /* mov r, xoreg ==> stwx rx,ry */
3782 o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(r))
3784 case 109: /* mov xoreg, r ==> lbzx/lhzx/lwzx rx,ry, lbzx rx,ry + extsb r,r */
3785 r := int(p.From.Reg)
3787 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(r))
3788 // Sign extend MOVB operations. This is ignored for other cases (o.size == 4).
3789 o2 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3799 func (c *ctxt9) vregoff(a *obj.Addr) int64 {
3807 func (c *ctxt9) regoff(a *obj.Addr) int32 {
3808 return int32(c.vregoff(a))
3811 func (c *ctxt9) oprrr(a obj.As) uint32 {
3814 return OPVCC(31, 266, 0, 0)
3816 return OPVCC(31, 266, 0, 1)
3818 return OPVCC(31, 266, 1, 0)
3820 return OPVCC(31, 266, 1, 1)
3822 return OPVCC(31, 10, 0, 0)
3824 return OPVCC(31, 10, 0, 1)
3826 return OPVCC(31, 10, 1, 0)
3828 return OPVCC(31, 10, 1, 1)
3830 return OPVCC(31, 138, 0, 0)
3832 return OPVCC(31, 138, 0, 1)
3834 return OPVCC(31, 138, 1, 0)
3836 return OPVCC(31, 138, 1, 1)
3838 return OPVCC(31, 234, 0, 0)
3840 return OPVCC(31, 234, 0, 1)
3842 return OPVCC(31, 234, 1, 0)
3844 return OPVCC(31, 234, 1, 1)
3846 return OPVCC(31, 202, 0, 0)
3848 return OPVCC(31, 202, 0, 1)
3850 return OPVCC(31, 202, 1, 0)
3852 return OPVCC(31, 202, 1, 1)
3854 return OPVCC(31, 170, 0, 0) /* addex - v3.0b */
3857 return OPVCC(31, 28, 0, 0)
3859 return OPVCC(31, 28, 0, 1)
3861 return OPVCC(31, 60, 0, 0)
3863 return OPVCC(31, 60, 0, 1)
3866 return OPVCC(31, 0, 0, 0) | 1<<21 /* L=1 */
3868 return OPVCC(31, 32, 0, 0) | 1<<21
3870 return OPVCC(31, 0, 0, 0) /* L=0 */
3872 return OPVCC(31, 32, 0, 0)
3874 return OPVCC(31, 508, 0, 0) /* cmpb - v2.05 */
3876 return OPVCC(31, 224, 0, 0) /* cmpeqb - v3.00 */
3879 return OPVCC(31, 26, 0, 0)
3881 return OPVCC(31, 26, 0, 1)
3883 return OPVCC(31, 58, 0, 0)
3885 return OPVCC(31, 58, 0, 1)
3888 return OPVCC(19, 257, 0, 0)
3890 return OPVCC(19, 129, 0, 0)
3892 return OPVCC(19, 289, 0, 0)
3894 return OPVCC(19, 225, 0, 0)
3896 return OPVCC(19, 33, 0, 0)
3898 return OPVCC(19, 449, 0, 0)
3900 return OPVCC(19, 417, 0, 0)
3902 return OPVCC(19, 193, 0, 0)
3905 return OPVCC(31, 86, 0, 0)
3907 return OPVCC(31, 470, 0, 0)
3909 return OPVCC(31, 54, 0, 0)
3911 return OPVCC(31, 278, 0, 0)
3913 return OPVCC(31, 246, 0, 0)
3915 return OPVCC(31, 1014, 0, 0)
3918 return OPVCC(31, 265, 0, 0) /* modud - v3.0 */
3920 return OPVCC(31, 267, 0, 0) /* moduw - v3.0 */
3922 return OPVCC(31, 777, 0, 0) /* modsd - v3.0 */
3924 return OPVCC(31, 779, 0, 0) /* modsw - v3.0 */
3927 return OPVCC(31, 491, 0, 0)
3930 return OPVCC(31, 491, 0, 1)
3933 return OPVCC(31, 491, 1, 0)
3936 return OPVCC(31, 491, 1, 1)
3939 return OPVCC(31, 459, 0, 0)
3942 return OPVCC(31, 459, 0, 1)
3945 return OPVCC(31, 459, 1, 0)
3948 return OPVCC(31, 459, 1, 1)
3951 return OPVCC(31, 489, 0, 0)
3954 return OPVCC(31, 489, 0, 1)
3957 return OPVCC(31, 425, 0, 0)
3960 return OPVCC(31, 425, 0, 1)
3963 return OPVCC(31, 393, 0, 0)
3966 return OPVCC(31, 393, 0, 1)
3969 return OPVCC(31, 489, 1, 0)
3972 return OPVCC(31, 489, 1, 1)
3974 case ADIVDU, AREMDU:
3975 return OPVCC(31, 457, 0, 0)
3978 return OPVCC(31, 457, 0, 1)
3981 return OPVCC(31, 457, 1, 0)
3984 return OPVCC(31, 457, 1, 1)
3987 return OPVCC(31, 854, 0, 0)
3990 return OPVCC(31, 284, 0, 0)
3992 return OPVCC(31, 284, 0, 1)
3995 return OPVCC(31, 954, 0, 0)
3997 return OPVCC(31, 954, 0, 1)
3999 return OPVCC(31, 922, 0, 0)
4001 return OPVCC(31, 922, 0, 1)
4003 return OPVCC(31, 986, 0, 0)
4005 return OPVCC(31, 986, 0, 1)
4008 return OPVCC(63, 264, 0, 0)
4010 return OPVCC(63, 264, 0, 1)
4012 return OPVCC(63, 21, 0, 0)
4014 return OPVCC(63, 21, 0, 1)
4016 return OPVCC(59, 21, 0, 0)
4018 return OPVCC(59, 21, 0, 1)
4020 return OPVCC(63, 32, 0, 0)
4022 return OPVCC(63, 0, 0, 0)
4024 return OPVCC(63, 846, 0, 0)
4026 return OPVCC(63, 846, 0, 1)
4028 return OPVCC(63, 974, 0, 0)
4030 return OPVCC(63, 974, 0, 1)
4032 return OPVCC(59, 846, 0, 0)
4034 return OPVCC(59, 846, 0, 1)
4036 return OPVCC(63, 14, 0, 0)
4038 return OPVCC(63, 14, 0, 1)
4040 return OPVCC(63, 15, 0, 0)
4042 return OPVCC(63, 15, 0, 1)
4044 return OPVCC(63, 814, 0, 0)
4046 return OPVCC(63, 814, 0, 1)
4048 return OPVCC(63, 815, 0, 0)
4050 return OPVCC(63, 815, 0, 1)
4052 return OPVCC(63, 18, 0, 0)
4054 return OPVCC(63, 18, 0, 1)
4056 return OPVCC(59, 18, 0, 0)
4058 return OPVCC(59, 18, 0, 1)
4060 return OPVCC(63, 29, 0, 0)
4062 return OPVCC(63, 29, 0, 1)
4064 return OPVCC(59, 29, 0, 0)
4066 return OPVCC(59, 29, 0, 1)
4068 case AFMOVS, AFMOVD:
4069 return OPVCC(63, 72, 0, 0) /* load */
4071 return OPVCC(63, 72, 0, 1)
4073 return OPVCC(63, 28, 0, 0)
4075 return OPVCC(63, 28, 0, 1)
4077 return OPVCC(59, 28, 0, 0)
4079 return OPVCC(59, 28, 0, 1)
4081 return OPVCC(63, 25, 0, 0)
4083 return OPVCC(63, 25, 0, 1)
4085 return OPVCC(59, 25, 0, 0)
4087 return OPVCC(59, 25, 0, 1)
4089 return OPVCC(63, 136, 0, 0)
4091 return OPVCC(63, 136, 0, 1)
4093 return OPVCC(63, 40, 0, 0)
4095 return OPVCC(63, 40, 0, 1)
4097 return OPVCC(63, 31, 0, 0)
4099 return OPVCC(63, 31, 0, 1)
4101 return OPVCC(59, 31, 0, 0)
4103 return OPVCC(59, 31, 0, 1)
4105 return OPVCC(63, 30, 0, 0)
4107 return OPVCC(63, 30, 0, 1)
4109 return OPVCC(59, 30, 0, 0)
4111 return OPVCC(59, 30, 0, 1)
4113 return OPVCC(63, 8, 0, 0)
4115 return OPVCC(63, 8, 0, 1)
4117 return OPVCC(59, 24, 0, 0)
4119 return OPVCC(59, 24, 0, 1)
4121 return OPVCC(63, 488, 0, 0)
4123 return OPVCC(63, 488, 0, 1)
4125 return OPVCC(63, 456, 0, 0)
4127 return OPVCC(63, 456, 0, 1)
4129 return OPVCC(63, 424, 0, 0)
4131 return OPVCC(63, 424, 0, 1)
4133 return OPVCC(63, 392, 0, 0)
4135 return OPVCC(63, 392, 0, 1)
4137 return OPVCC(63, 12, 0, 0)
4139 return OPVCC(63, 12, 0, 1)
4141 return OPVCC(63, 26, 0, 0)
4143 return OPVCC(63, 26, 0, 1)
4145 return OPVCC(63, 23, 0, 0)
4147 return OPVCC(63, 23, 0, 1)
4149 return OPVCC(63, 22, 0, 0)
4151 return OPVCC(63, 22, 0, 1)
4153 return OPVCC(59, 22, 0, 0)
4155 return OPVCC(59, 22, 0, 1)
4157 return OPVCC(63, 20, 0, 0)
4159 return OPVCC(63, 20, 0, 1)
4161 return OPVCC(59, 20, 0, 0)
4163 return OPVCC(59, 20, 0, 1)
4166 return OPVCC(31, 982, 0, 0)
4168 return OPVCC(19, 150, 0, 0)
4171 return OPVCC(63, 70, 0, 0)
4173 return OPVCC(63, 70, 0, 1)
4175 return OPVCC(63, 38, 0, 0)
4177 return OPVCC(63, 38, 0, 1)
4180 return OPVCC(31, 75, 0, 0)
4182 return OPVCC(31, 75, 0, 1)
4184 return OPVCC(31, 11, 0, 0)
4186 return OPVCC(31, 11, 0, 1)
4188 return OPVCC(31, 235, 0, 0)
4190 return OPVCC(31, 235, 0, 1)
4192 return OPVCC(31, 235, 1, 0)
4194 return OPVCC(31, 235, 1, 1)
4197 return OPVCC(31, 73, 0, 0)
4199 return OPVCC(31, 73, 0, 1)
4201 return OPVCC(31, 9, 0, 0)
4203 return OPVCC(31, 9, 0, 1)
4205 return OPVCC(31, 233, 0, 0)
4207 return OPVCC(31, 233, 0, 1)
4209 return OPVCC(31, 233, 1, 0)
4211 return OPVCC(31, 233, 1, 1)
4214 return OPVCC(31, 476, 0, 0)
4216 return OPVCC(31, 476, 0, 1)
4218 return OPVCC(31, 104, 0, 0)
4220 return OPVCC(31, 104, 0, 1)
4222 return OPVCC(31, 104, 1, 0)
4224 return OPVCC(31, 104, 1, 1)
4226 return OPVCC(31, 124, 0, 0)
4228 return OPVCC(31, 124, 0, 1)
4230 return OPVCC(31, 444, 0, 0)
4232 return OPVCC(31, 444, 0, 1)
4234 return OPVCC(31, 412, 0, 0)
4236 return OPVCC(31, 412, 0, 1)
4239 return OPVCC(31, 506, 0, 0) /* popcntd - v2.06 */
4241 return OPVCC(31, 378, 0, 0) /* popcntw - v2.06 */
4243 return OPVCC(31, 122, 0, 0) /* popcntb - v2.02 */
4245 return OPVCC(31, 538, 0, 0) /* cnttzw - v3.00 */
4247 return OPVCC(31, 538, 0, 1) /* cnttzw. - v3.00 */
4249 return OPVCC(31, 570, 0, 0) /* cnttzd - v3.00 */
4251 return OPVCC(31, 570, 0, 1) /* cnttzd. - v3.00 */
4254 return OPVCC(19, 50, 0, 0)
4256 return OPVCC(19, 51, 0, 0)
4258 return OPVCC(19, 18, 0, 0)
4260 return OPVCC(19, 274, 0, 0)
4263 return OPVCC(20, 0, 0, 0)
4265 return OPVCC(20, 0, 0, 1)
4267 return OPVCC(23, 0, 0, 0)
4269 return OPVCC(23, 0, 0, 1)
4272 return OPVCC(30, 8, 0, 0)
4274 return OPVCC(30, 0, 0, 1)
4277 return OPVCC(30, 9, 0, 0)
4279 return OPVCC(30, 9, 0, 1)
4282 return OPVCC(30, 0, 0, 0)
4284 return OPVCC(30, 0, 0, 1)
4286 return OPMD(30, 1, 0) // rldicr
4288 return OPMD(30, 1, 1) // rldicr.
4291 return OPMD(30, 2, 0) // rldic
4293 return OPMD(30, 2, 1) // rldic.
4296 return OPVCC(17, 1, 0, 0)
4299 return OPVCC(31, 24, 0, 0)
4301 return OPVCC(31, 24, 0, 1)
4303 return OPVCC(31, 27, 0, 0)
4305 return OPVCC(31, 27, 0, 1)
4308 return OPVCC(31, 792, 0, 0)
4310 return OPVCC(31, 792, 0, 1)
4312 return OPVCC(31, 794, 0, 0)
4314 return OPVCC(31, 794, 0, 1)
4317 return OPVCC(31, 445, 0, 0)
4319 return OPVCC(31, 445, 0, 1)
4322 return OPVCC(31, 536, 0, 0)
4324 return OPVCC(31, 536, 0, 1)
4326 return OPVCC(31, 539, 0, 0)
4328 return OPVCC(31, 539, 0, 1)
4331 return OPVCC(31, 40, 0, 0)
4333 return OPVCC(31, 40, 0, 1)
4335 return OPVCC(31, 40, 1, 0)
4337 return OPVCC(31, 40, 1, 1)
4339 return OPVCC(31, 8, 0, 0)
4341 return OPVCC(31, 8, 0, 1)
4343 return OPVCC(31, 8, 1, 0)
4345 return OPVCC(31, 8, 1, 1)
4347 return OPVCC(31, 136, 0, 0)
4349 return OPVCC(31, 136, 0, 1)
4351 return OPVCC(31, 136, 1, 0)
4353 return OPVCC(31, 136, 1, 1)
4355 return OPVCC(31, 232, 0, 0)
4357 return OPVCC(31, 232, 0, 1)
4359 return OPVCC(31, 232, 1, 0)
4361 return OPVCC(31, 232, 1, 1)
4363 return OPVCC(31, 200, 0, 0)
4365 return OPVCC(31, 200, 0, 1)
4367 return OPVCC(31, 200, 1, 0)
4369 return OPVCC(31, 200, 1, 1)
4372 return OPVCC(31, 598, 0, 0)
4374 return OPVCC(31, 598, 0, 0) | 1<<21
4377 return OPVCC(31, 598, 0, 0) | 2<<21
4380 return OPVCC(31, 306, 0, 0)
4382 return OPVCC(31, 274, 0, 0)
4384 return OPVCC(31, 566, 0, 0)
4386 return OPVCC(31, 498, 0, 0)
4388 return OPVCC(31, 434, 0, 0)
4390 return OPVCC(31, 915, 0, 0)
4392 return OPVCC(31, 851, 0, 0)
4394 return OPVCC(31, 402, 0, 0)
4397 return OPVCC(31, 4, 0, 0)
4399 return OPVCC(31, 68, 0, 0)
4401 /* Vector (VMX/Altivec) instructions */
4402 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
4403 /* are enabled starting at POWER6 (ISA 2.05). */
4405 return OPVX(4, 1028, 0, 0) /* vand - v2.03 */
4407 return OPVX(4, 1092, 0, 0) /* vandc - v2.03 */
4409 return OPVX(4, 1412, 0, 0) /* vnand - v2.07 */
4412 return OPVX(4, 1156, 0, 0) /* vor - v2.03 */
4414 return OPVX(4, 1348, 0, 0) /* vorc - v2.07 */
4416 return OPVX(4, 1284, 0, 0) /* vnor - v2.03 */
4418 return OPVX(4, 1220, 0, 0) /* vxor - v2.03 */
4420 return OPVX(4, 1668, 0, 0) /* veqv - v2.07 */
4423 return OPVX(4, 0, 0, 0) /* vaddubm - v2.03 */
4425 return OPVX(4, 64, 0, 0) /* vadduhm - v2.03 */
4427 return OPVX(4, 128, 0, 0) /* vadduwm - v2.03 */
4429 return OPVX(4, 192, 0, 0) /* vaddudm - v2.07 */
4431 return OPVX(4, 256, 0, 0) /* vadduqm - v2.07 */
4434 return OPVX(4, 320, 0, 0) /* vaddcuq - v2.07 */
4436 return OPVX(4, 384, 0, 0) /* vaddcuw - v2.03 */
4439 return OPVX(4, 512, 0, 0) /* vaddubs - v2.03 */
4441 return OPVX(4, 576, 0, 0) /* vadduhs - v2.03 */
4443 return OPVX(4, 640, 0, 0) /* vadduws - v2.03 */
4446 return OPVX(4, 768, 0, 0) /* vaddsbs - v2.03 */
4448 return OPVX(4, 832, 0, 0) /* vaddshs - v2.03 */
4450 return OPVX(4, 896, 0, 0) /* vaddsws - v2.03 */
4453 return OPVX(4, 60, 0, 0) /* vaddeuqm - v2.07 */
4455 return OPVX(4, 61, 0, 0) /* vaddecuq - v2.07 */
4458 return OPVX(4, 776, 0, 0) /* vmulesb - v2.03 */
4460 return OPVX(4, 264, 0, 0) /* vmulosb - v2.03 */
4462 return OPVX(4, 520, 0, 0) /* vmuleub - v2.03 */
4464 return OPVX(4, 8, 0, 0) /* vmuloub - v2.03 */
4466 return OPVX(4, 840, 0, 0) /* vmulesh - v2.03 */
4468 return OPVX(4, 328, 0, 0) /* vmulosh - v2.03 */
4470 return OPVX(4, 584, 0, 0) /* vmuleuh - v2.03 */
4472 return OPVX(4, 72, 0, 0) /* vmulouh - v2.03 */
4474 return OPVX(4, 904, 0, 0) /* vmulesw - v2.07 */
4476 return OPVX(4, 392, 0, 0) /* vmulosw - v2.07 */
4478 return OPVX(4, 648, 0, 0) /* vmuleuw - v2.07 */
4480 return OPVX(4, 136, 0, 0) /* vmulouw - v2.07 */
4482 return OPVX(4, 137, 0, 0) /* vmuluwm - v2.07 */
4485 return OPVX(4, 1032, 0, 0) /* vpmsumb - v2.07 */
4487 return OPVX(4, 1096, 0, 0) /* vpmsumh - v2.07 */
4489 return OPVX(4, 1160, 0, 0) /* vpmsumw - v2.07 */
4491 return OPVX(4, 1224, 0, 0) /* vpmsumd - v2.07 */
4494 return OPVX(4, 35, 0, 0) /* vmsumudm - v3.00b */
4497 return OPVX(4, 1024, 0, 0) /* vsububm - v2.03 */
4499 return OPVX(4, 1088, 0, 0) /* vsubuhm - v2.03 */
4501 return OPVX(4, 1152, 0, 0) /* vsubuwm - v2.03 */
4503 return OPVX(4, 1216, 0, 0) /* vsubudm - v2.07 */
4505 return OPVX(4, 1280, 0, 0) /* vsubuqm - v2.07 */
4508 return OPVX(4, 1344, 0, 0) /* vsubcuq - v2.07 */
4510 return OPVX(4, 1408, 0, 0) /* vsubcuw - v2.03 */
4513 return OPVX(4, 1536, 0, 0) /* vsububs - v2.03 */
4515 return OPVX(4, 1600, 0, 0) /* vsubuhs - v2.03 */
4517 return OPVX(4, 1664, 0, 0) /* vsubuws - v2.03 */
4520 return OPVX(4, 1792, 0, 0) /* vsubsbs - v2.03 */
4522 return OPVX(4, 1856, 0, 0) /* vsubshs - v2.03 */
4524 return OPVX(4, 1920, 0, 0) /* vsubsws - v2.03 */
4527 return OPVX(4, 62, 0, 0) /* vsubeuqm - v2.07 */
4529 return OPVX(4, 63, 0, 0) /* vsubecuq - v2.07 */
4532 return OPVX(4, 4, 0, 0) /* vrlb - v2.03 */
4534 return OPVX(4, 68, 0, 0) /* vrlh - v2.03 */
4536 return OPVX(4, 132, 0, 0) /* vrlw - v2.03 */
4538 return OPVX(4, 196, 0, 0) /* vrld - v2.07 */
4541 return OPVX(4, 1676, 0, 0) /* vmrgow - v2.07 */
4543 return OPVX(4, 1932, 0, 0) /* vmrgew - v2.07 */
4546 return OPVX(4, 260, 0, 0) /* vslh - v2.03 */
4548 return OPVX(4, 324, 0, 0) /* vslh - v2.03 */
4550 return OPVX(4, 388, 0, 0) /* vslw - v2.03 */
4552 return OPVX(4, 452, 0, 0) /* vsl - v2.03 */
4554 return OPVX(4, 1036, 0, 0) /* vsl - v2.03 */
4556 return OPVX(4, 516, 0, 0) /* vsrb - v2.03 */
4558 return OPVX(4, 580, 0, 0) /* vsrh - v2.03 */
4560 return OPVX(4, 644, 0, 0) /* vsrw - v2.03 */
4562 return OPVX(4, 708, 0, 0) /* vsr - v2.03 */
4564 return OPVX(4, 1100, 0, 0) /* vsro - v2.03 */
4566 return OPVX(4, 1476, 0, 0) /* vsld - v2.07 */
4568 return OPVX(4, 1732, 0, 0) /* vsrd - v2.07 */
4571 return OPVX(4, 772, 0, 0) /* vsrab - v2.03 */
4573 return OPVX(4, 836, 0, 0) /* vsrah - v2.03 */
4575 return OPVX(4, 900, 0, 0) /* vsraw - v2.03 */
4577 return OPVX(4, 964, 0, 0) /* vsrad - v2.07 */
4580 return OPVC(4, 1356, 0, 0) /* vbpermq - v2.07 */
4582 return OPVC(4, 1484, 0, 0) /* vbpermd - v3.00 */
4585 return OPVX(4, 1794, 0, 0) /* vclzb - v2.07 */
4587 return OPVX(4, 1858, 0, 0) /* vclzh - v2.07 */
4589 return OPVX(4, 1922, 0, 0) /* vclzw - v2.07 */
4591 return OPVX(4, 1986, 0, 0) /* vclzd - v2.07 */
4594 return OPVX(4, 1795, 0, 0) /* vpopcntb - v2.07 */
4596 return OPVX(4, 1859, 0, 0) /* vpopcnth - v2.07 */
4598 return OPVX(4, 1923, 0, 0) /* vpopcntw - v2.07 */
4600 return OPVX(4, 1987, 0, 0) /* vpopcntd - v2.07 */
4603 return OPVC(4, 6, 0, 0) /* vcmpequb - v2.03 */
4605 return OPVC(4, 6, 0, 1) /* vcmpequb. - v2.03 */
4607 return OPVC(4, 70, 0, 0) /* vcmpequh - v2.03 */
4609 return OPVC(4, 70, 0, 1) /* vcmpequh. - v2.03 */
4611 return OPVC(4, 134, 0, 0) /* vcmpequw - v2.03 */
4613 return OPVC(4, 134, 0, 1) /* vcmpequw. - v2.03 */
4615 return OPVC(4, 199, 0, 0) /* vcmpequd - v2.07 */
4617 return OPVC(4, 199, 0, 1) /* vcmpequd. - v2.07 */
4620 return OPVC(4, 518, 0, 0) /* vcmpgtub - v2.03 */
4622 return OPVC(4, 518, 0, 1) /* vcmpgtub. - v2.03 */
4624 return OPVC(4, 582, 0, 0) /* vcmpgtuh - v2.03 */
4626 return OPVC(4, 582, 0, 1) /* vcmpgtuh. - v2.03 */
4628 return OPVC(4, 646, 0, 0) /* vcmpgtuw - v2.03 */
4630 return OPVC(4, 646, 0, 1) /* vcmpgtuw. - v2.03 */
4632 return OPVC(4, 711, 0, 0) /* vcmpgtud - v2.07 */
4634 return OPVC(4, 711, 0, 1) /* vcmpgtud. v2.07 */
4636 return OPVC(4, 774, 0, 0) /* vcmpgtsb - v2.03 */
4638 return OPVC(4, 774, 0, 1) /* vcmpgtsb. - v2.03 */
4640 return OPVC(4, 838, 0, 0) /* vcmpgtsh - v2.03 */
4642 return OPVC(4, 838, 0, 1) /* vcmpgtsh. - v2.03 */
4644 return OPVC(4, 902, 0, 0) /* vcmpgtsw - v2.03 */
4646 return OPVC(4, 902, 0, 1) /* vcmpgtsw. - v2.03 */
4648 return OPVC(4, 967, 0, 0) /* vcmpgtsd - v2.07 */
4650 return OPVC(4, 967, 0, 1) /* vcmpgtsd. - v2.07 */
4653 return OPVC(4, 263, 0, 0) /* vcmpnezb - v3.00 */
4655 return OPVC(4, 263, 0, 1) /* vcmpnezb. - v3.00 */
4657 return OPVC(4, 7, 0, 0) /* vcmpneb - v3.00 */
4659 return OPVC(4, 7, 0, 1) /* vcmpneb. - v3.00 */
4661 return OPVC(4, 71, 0, 0) /* vcmpneh - v3.00 */
4663 return OPVC(4, 71, 0, 1) /* vcmpneh. - v3.00 */
4665 return OPVC(4, 135, 0, 0) /* vcmpnew - v3.00 */
4667 return OPVC(4, 135, 0, 1) /* vcmpnew. - v3.00 */
4670 return OPVX(4, 43, 0, 0) /* vperm - v2.03 */
4672 return OPVX(4, 45, 0, 0) /* vpermxor - v2.03 */
4674 return OPVX(4, 59, 0, 0) /* vpermr - v3.0 */
4677 return OPVX(4, 42, 0, 0) /* vsel - v2.03 */
4680 return OPVX(4, 1288, 0, 0) /* vcipher - v2.07 */
4682 return OPVX(4, 1289, 0, 0) /* vcipherlast - v2.07 */
4684 return OPVX(4, 1352, 0, 0) /* vncipher - v2.07 */
4686 return OPVX(4, 1353, 0, 0) /* vncipherlast - v2.07 */
4688 return OPVX(4, 1480, 0, 0) /* vsbox - v2.07 */
4689 /* End of vector instructions */
4691 /* Vector scalar (VSX) instructions */
4692 /* ISA 2.06 enables these for POWER7. */
4693 case AMFVSRD, AMFVRD, AMFFPRD:
4694 return OPVXX1(31, 51, 0) /* mfvsrd - v2.07 */
4696 return OPVXX1(31, 115, 0) /* mfvsrwz - v2.07 */
4698 return OPVXX1(31, 307, 0) /* mfvsrld - v3.00 */
4700 case AMTVSRD, AMTFPRD, AMTVRD:
4701 return OPVXX1(31, 179, 0) /* mtvsrd - v2.07 */
4703 return OPVXX1(31, 211, 0) /* mtvsrwa - v2.07 */
4705 return OPVXX1(31, 243, 0) /* mtvsrwz - v2.07 */
4707 return OPVXX1(31, 435, 0) /* mtvsrdd - v3.00 */
4709 return OPVXX1(31, 403, 0) /* mtvsrws - v3.00 */
4712 return OPVXX3(60, 130, 0) /* xxland - v2.06 */
4714 return OPVXX3(60, 138, 0) /* xxlandc - v2.06 */
4716 return OPVXX3(60, 186, 0) /* xxleqv - v2.07 */
4718 return OPVXX3(60, 178, 0) /* xxlnand - v2.07 */
4721 return OPVXX3(60, 170, 0) /* xxlorc - v2.07 */
4723 return OPVXX3(60, 162, 0) /* xxlnor - v2.06 */
4724 case AXXLOR, AXXLORQ:
4725 return OPVXX3(60, 146, 0) /* xxlor - v2.06 */
4727 return OPVXX3(60, 154, 0) /* xxlxor - v2.06 */
4730 return OPVXX4(60, 3, 0) /* xxsel - v2.06 */
4733 return OPVXX3(60, 18, 0) /* xxmrghw - v2.06 */
4735 return OPVXX3(60, 50, 0) /* xxmrglw - v2.06 */
4738 return OPVXX2(60, 164, 0) /* xxspltw - v2.06 */
4741 return OPVCC(60, 360, 0, 0) /* xxspltib - v3.0 */
4744 return OPVXX3(60, 26, 0) /* xxperm - v2.06 */
4746 return OPVXX3(60, 10, 0) /* xxpermdi - v2.06 */
4749 return OPVXX3(60, 2, 0) /* xxsldwi - v2.06 */
4752 return OPVXX2VA(60, 475, 31) /* xxbrq - v3.0 */
4754 return OPVXX2VA(60, 475, 23) /* xxbrd - v3.0 */
4756 return OPVXX2VA(60, 475, 15) /* xxbrw - v3.0 */
4758 return OPVXX2VA(60, 475, 7) /* xxbrh - v3.0 */
4761 return OPVXX2(60, 265, 0) /* xscvdpsp - v2.06 */
4763 return OPVXX2(60, 329, 0) /* xscvspdp - v2.06 */
4765 return OPVXX2(60, 267, 0) /* xscvdpspn - v2.07 */
4767 return OPVXX2(60, 331, 0) /* xscvspdpn - v2.07 */
4770 return OPVXX2(60, 393, 0) /* xvcvdpsp - v2.06 */
4772 return OPVXX2(60, 457, 0) /* xvcvspdp - v2.06 */
4775 return OPVXX2(60, 344, 0) /* xscvdpsxds - v2.06 */
4777 return OPVXX2(60, 88, 0) /* xscvdpsxws - v2.06 */
4779 return OPVXX2(60, 328, 0) /* xscvdpuxds - v2.06 */
4781 return OPVXX2(60, 72, 0) /* xscvdpuxws - v2.06 */
4784 return OPVXX2(60, 376, 0) /* xscvsxddp - v2.06 */
4786 return OPVXX2(60, 360, 0) /* xscvuxddp - v2.06 */
4788 return OPVXX2(60, 312, 0) /* xscvsxdsp - v2.06 */
4790 return OPVXX2(60, 296, 0) /* xscvuxdsp - v2.06 */
4793 return OPVXX2(60, 472, 0) /* xvcvdpsxds - v2.06 */
4795 return OPVXX2(60, 216, 0) /* xvcvdpsxws - v2.06 */
4797 return OPVXX2(60, 456, 0) /* xvcvdpuxds - v2.06 */
4799 return OPVXX2(60, 200, 0) /* xvcvdpuxws - v2.06 */
4801 return OPVXX2(60, 408, 0) /* xvcvspsxds - v2.07 */
4803 return OPVXX2(60, 152, 0) /* xvcvspsxws - v2.07 */
4805 return OPVXX2(60, 392, 0) /* xvcvspuxds - v2.07 */
4807 return OPVXX2(60, 136, 0) /* xvcvspuxws - v2.07 */
4810 return OPVXX2(60, 504, 0) /* xvcvsxddp - v2.06 */
4812 return OPVXX2(60, 248, 0) /* xvcvsxwdp - v2.06 */
4814 return OPVXX2(60, 488, 0) /* xvcvuxddp - v2.06 */
4816 return OPVXX2(60, 232, 0) /* xvcvuxwdp - v2.06 */
4818 return OPVXX2(60, 440, 0) /* xvcvsxdsp - v2.06 */
4820 return OPVXX2(60, 184, 0) /* xvcvsxwsp - v2.06 */
4822 return OPVXX2(60, 424, 0) /* xvcvuxdsp - v2.06 */
4824 return OPVXX2(60, 168, 0) /* xvcvuxwsp - v2.06 */
4825 /* End of VSX instructions */
4828 return OPVX(4, 48, 0, 0) /* maddhd - v3.00 */
4830 return OPVX(4, 49, 0, 0) /* maddhdu - v3.00 */
4832 return OPVX(4, 51, 0, 0) /* maddld - v3.00 */
4835 return OPVCC(31, 316, 0, 0)
4837 return OPVCC(31, 316, 0, 1)
4840 c.ctxt.Diag("bad r/r, r/r/r or r/r/r/r opcode %v", a)
4844 func (c *ctxt9) opirrr(a obj.As) uint32 {
4846 /* Vector (VMX/Altivec) instructions */
4847 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
4848 /* are enabled starting at POWER6 (ISA 2.05). */
4850 return OPVX(4, 44, 0, 0) /* vsldoi - v2.03 */
4853 c.ctxt.Diag("bad i/r/r/r opcode %v", a)
4857 func (c *ctxt9) opiirr(a obj.As) uint32 {
4859 /* Vector (VMX/Altivec) instructions */
4860 /* ISA 2.07 enables these for POWER8 and beyond. */
4862 return OPVX(4, 1666, 0, 0) /* vshasigmaw - v2.07 */
4864 return OPVX(4, 1730, 0, 0) /* vshasigmad - v2.07 */
4867 c.ctxt.Diag("bad i/i/r/r opcode %v", a)
4871 func (c *ctxt9) opirr(a obj.As) uint32 {
4874 return OPVCC(14, 0, 0, 0)
4876 return OPVCC(12, 0, 0, 0)
4878 return OPVCC(13, 0, 0, 0)
4880 return OPVCC(15, 0, 0, 0) /* ADDIS */
4883 return OPVCC(28, 0, 0, 0)
4885 return OPVCC(29, 0, 0, 0) /* ANDIS. */
4888 return OPVCC(18, 0, 0, 0)
4890 return OPVCC(18, 0, 0, 0) | 1
4892 return OPVCC(18, 0, 0, 0) | 1
4894 return OPVCC(18, 0, 0, 0) | 1
4896 return OPVCC(16, 0, 0, 0)
4898 return OPVCC(16, 0, 0, 0) | 1
4901 return AOP_RRR(16<<26, BO_BCR, BI_EQ, 0)
4903 return AOP_RRR(16<<26, BO_NOTBCR, BI_LT, 0)
4905 return AOP_RRR(16<<26, BO_BCR, BI_GT, 0)
4907 return AOP_RRR(16<<26, BO_NOTBCR, BI_GT, 0)
4909 return AOP_RRR(16<<26, BO_BCR, BI_LT, 0)
4911 return AOP_RRR(16<<26, BO_NOTBCR, BI_EQ, 0)
4913 return AOP_RRR(16<<26, BO_NOTBCR, BI_FU, 0)
4915 return AOP_RRR(16<<26, BO_BCR, BI_FU, 0)
4917 return AOP_RRR(16<<26, BO_NOTBCTR, 0, 0)
4919 return AOP_RRR(16<<26, BO_BCTR, 0, 0)
4922 return OPVCC(11, 0, 0, 0) | 1<<21 /* L=1 */
4924 return OPVCC(10, 0, 0, 0) | 1<<21
4926 return OPVCC(11, 0, 0, 0) /* L=0 */
4928 return OPVCC(10, 0, 0, 0)
4930 return OPVCC(31, 224, 0, 0) /* cmpeqb - v3.00 */
4933 return OPVCC(31, 597, 0, 0)
4936 return OPVCC(31, 774, 0, 0) /* copy - v3.00 */
4938 return OPVCC(31, 902, 0, 1) /* paste. - v3.00 */
4940 return OPVCC(31, 755, 0, 0) /* darn - v3.00 */
4942 case AMULLW, AMULLD:
4943 return OPVCC(7, 0, 0, 0) /* mulli works with MULLW or MULLD */
4946 return OPVCC(24, 0, 0, 0)
4948 return OPVCC(25, 0, 0, 0) /* ORIS */
4951 return OPVCC(20, 0, 0, 0) /* rlwimi */
4953 return OPVCC(20, 0, 0, 1)
4955 return OPMD(30, 3, 0) /* rldimi */
4957 return OPMD(30, 3, 1) /* rldimi. */
4959 return OPMD(30, 3, 0) /* rldimi */
4961 return OPMD(30, 3, 1) /* rldimi. */
4963 return OPVCC(21, 0, 0, 0) /* rlwinm */
4965 return OPVCC(21, 0, 0, 1)
4968 return OPMD(30, 0, 0) /* rldicl */
4970 return OPMD(30, 0, 1) /* rldicl. */
4972 return OPMD(30, 1, 0) /* rldicr */
4974 return OPMD(30, 1, 1) /* rldicr. */
4976 return OPMD(30, 2, 0) /* rldic */
4978 return OPMD(30, 2, 1) /* rldic. */
4981 return OPVCC(31, 824, 0, 0)
4983 return OPVCC(31, 824, 0, 1)
4985 return OPVCC(31, (413 << 1), 0, 0)
4987 return OPVCC(31, (413 << 1), 0, 1)
4989 return OPVCC(31, 445, 0, 0)
4991 return OPVCC(31, 445, 0, 1)
4994 return OPVCC(31, 725, 0, 0)
4997 return OPVCC(8, 0, 0, 0)
5000 return OPVCC(3, 0, 0, 0)
5002 return OPVCC(2, 0, 0, 0)
5004 /* Vector (VMX/Altivec) instructions */
5005 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
5006 /* are enabled starting at POWER6 (ISA 2.05). */
5008 return OPVX(4, 524, 0, 0) /* vspltb - v2.03 */
5010 return OPVX(4, 588, 0, 0) /* vsplth - v2.03 */
5012 return OPVX(4, 652, 0, 0) /* vspltw - v2.03 */
5015 return OPVX(4, 780, 0, 0) /* vspltisb - v2.03 */
5017 return OPVX(4, 844, 0, 0) /* vspltish - v2.03 */
5019 return OPVX(4, 908, 0, 0) /* vspltisw - v2.03 */
5020 /* End of vector instructions */
5023 return OPVCC(63, 128, 0, 0) /* ftdiv - v2.06 */
5025 return OPVCC(63, 160, 0, 0) /* ftsqrt - v2.06 */
5028 return OPVCC(26, 0, 0, 0) /* XORIL */
5030 return OPVCC(27, 0, 0, 0) /* XORIS */
5033 c.ctxt.Diag("bad opcode i/r or i/r/r %v", a)
5040 func (c *ctxt9) opload(a obj.As) uint32 {
5043 return OPVCC(58, 0, 0, 0) /* ld */
5045 return OPVCC(58, 0, 0, 1) /* ldu */
5047 return OPVCC(32, 0, 0, 0) /* lwz */
5049 return OPVCC(33, 0, 0, 0) /* lwzu */
5051 return OPVCC(58, 0, 0, 0) | 1<<1 /* lwa */
5053 return OPDQ(61, 1, 0) /* lxv - ISA v3.0 */
5055 return OPVXX1(31, 269, 0) /* lxvl - ISA v3.0 */
5057 return OPVXX1(31, 301, 0) /* lxvll - ISA v3.0 */
5059 return OPVXX1(31, 268, 0) /* lxvx - ISA v3.0 */
5063 return OPVCC(34, 0, 0, 0)
5066 case AMOVBU, AMOVBZU:
5067 return OPVCC(35, 0, 0, 0)
5069 return OPVCC(50, 0, 0, 0)
5071 return OPVCC(51, 0, 0, 0)
5073 return OPVCC(48, 0, 0, 0)
5075 return OPVCC(49, 0, 0, 0)
5077 return OPVCC(42, 0, 0, 0)
5079 return OPVCC(43, 0, 0, 0)
5081 return OPVCC(40, 0, 0, 0)
5083 return OPVCC(41, 0, 0, 0)
5085 return OPVCC(46, 0, 0, 0) /* lmw */
5088 c.ctxt.Diag("bad load opcode %v", a)
5093 * indexed load a(b),d
5095 func (c *ctxt9) oploadx(a obj.As) uint32 {
5098 return OPVCC(31, 23, 0, 0) /* lwzx */
5100 return OPVCC(31, 55, 0, 0) /* lwzux */
5102 return OPVCC(31, 341, 0, 0) /* lwax */
5104 return OPVCC(31, 373, 0, 0) /* lwaux */
5107 return OPVCC(31, 87, 0, 0) /* lbzx */
5109 case AMOVBU, AMOVBZU:
5110 return OPVCC(31, 119, 0, 0) /* lbzux */
5112 return OPVCC(31, 599, 0, 0) /* lfdx */
5114 return OPVCC(31, 631, 0, 0) /* lfdux */
5116 return OPVCC(31, 535, 0, 0) /* lfsx */
5118 return OPVCC(31, 567, 0, 0) /* lfsux */
5120 return OPVCC(31, 855, 0, 0) /* lfiwax - power6, isa 2.05 */
5122 return OPVCC(31, 887, 0, 0) /* lfiwzx - power7, isa 2.06 */
5124 return OPVCC(31, 343, 0, 0) /* lhax */
5126 return OPVCC(31, 375, 0, 0) /* lhaux */
5128 return OPVCC(31, 790, 0, 0) /* lhbrx */
5130 return OPVCC(31, 534, 0, 0) /* lwbrx */
5132 return OPVCC(31, 532, 0, 0) /* ldbrx */
5134 return OPVCC(31, 279, 0, 0) /* lhzx */
5136 return OPVCC(31, 311, 0, 0) /* lhzux */
5138 return OPVCC(31, 52, 0, 0) /* lbarx */
5140 return OPVCC(31, 116, 0, 0) /* lharx */
5142 return OPVCC(31, 20, 0, 0) /* lwarx */
5144 return OPVCC(31, 84, 0, 0) /* ldarx */
5146 return OPVCC(31, 533, 0, 0) /* lswx */
5148 return OPVCC(31, 21, 0, 0) /* ldx */
5150 return OPVCC(31, 53, 0, 0) /* ldux */
5152 /* Vector (VMX/Altivec) instructions */
5154 return OPVCC(31, 7, 0, 0) /* lvebx - v2.03 */
5156 return OPVCC(31, 39, 0, 0) /* lvehx - v2.03 */
5158 return OPVCC(31, 71, 0, 0) /* lvewx - v2.03 */
5160 return OPVCC(31, 103, 0, 0) /* lvx - v2.03 */
5162 return OPVCC(31, 359, 0, 0) /* lvxl - v2.03 */
5164 return OPVCC(31, 6, 0, 0) /* lvsl - v2.03 */
5166 return OPVCC(31, 38, 0, 0) /* lvsr - v2.03 */
5167 /* End of vector instructions */
5169 /* Vector scalar (VSX) instructions */
5171 return OPVXX1(31, 268, 0) /* lxvx - ISA v3.0 */
5173 return OPVXX1(31, 844, 0) /* lxvd2x - v2.06 */
5175 return OPVXX1(31, 780, 0) /* lxvw4x - v2.06 */
5177 return OPVXX1(31, 812, 0) /* lxvh8x - v3.00 */
5179 return OPVXX1(31, 876, 0) /* lxvb16x - v3.00 */
5181 return OPVXX1(31, 332, 0) /* lxvdsx - v2.06 */
5183 return OPVXX1(31, 588, 0) /* lxsdx - v2.06 */
5185 return OPVXX1(31, 76, 0) /* lxsiwax - v2.07 */
5187 return OPVXX1(31, 12, 0) /* lxsiwzx - v2.07 */
5190 c.ctxt.Diag("bad loadx opcode %v", a)
5197 func (c *ctxt9) opstore(a obj.As) uint32 {
5200 return OPVCC(38, 0, 0, 0) /* stb */
5202 case AMOVBU, AMOVBZU:
5203 return OPVCC(39, 0, 0, 0) /* stbu */
5205 return OPVCC(54, 0, 0, 0) /* stfd */
5207 return OPVCC(55, 0, 0, 0) /* stfdu */
5209 return OPVCC(52, 0, 0, 0) /* stfs */
5211 return OPVCC(53, 0, 0, 0) /* stfsu */
5214 return OPVCC(44, 0, 0, 0) /* sth */
5216 case AMOVHZU, AMOVHU:
5217 return OPVCC(45, 0, 0, 0) /* sthu */
5219 return OPVCC(47, 0, 0, 0) /* stmw */
5221 return OPVCC(31, 725, 0, 0) /* stswi */
5224 return OPVCC(36, 0, 0, 0) /* stw */
5226 case AMOVWZU, AMOVWU:
5227 return OPVCC(37, 0, 0, 0) /* stwu */
5229 return OPVCC(62, 0, 0, 0) /* std */
5231 return OPVCC(62, 0, 0, 1) /* stdu */
5233 return OPDQ(61, 5, 0) /* stxv ISA 3.0 */
5235 return OPVXX1(31, 397, 0) /* stxvl ISA 3.0 */
5237 return OPVXX1(31, 429, 0) /* stxvll ISA 3.0 */
5239 return OPVXX1(31, 396, 0) /* stxvx - ISA v3.0 */
5243 c.ctxt.Diag("unknown store opcode %v", a)
5248 * indexed store s,a(b)
5250 func (c *ctxt9) opstorex(a obj.As) uint32 {
5253 return OPVCC(31, 215, 0, 0) /* stbx */
5255 case AMOVBU, AMOVBZU:
5256 return OPVCC(31, 247, 0, 0) /* stbux */
5258 return OPVCC(31, 727, 0, 0) /* stfdx */
5260 return OPVCC(31, 759, 0, 0) /* stfdux */
5262 return OPVCC(31, 663, 0, 0) /* stfsx */
5264 return OPVCC(31, 695, 0, 0) /* stfsux */
5266 return OPVCC(31, 983, 0, 0) /* stfiwx */
5269 return OPVCC(31, 407, 0, 0) /* sthx */
5271 return OPVCC(31, 918, 0, 0) /* sthbrx */
5273 case AMOVHZU, AMOVHU:
5274 return OPVCC(31, 439, 0, 0) /* sthux */
5277 return OPVCC(31, 151, 0, 0) /* stwx */
5279 case AMOVWZU, AMOVWU:
5280 return OPVCC(31, 183, 0, 0) /* stwux */
5282 return OPVCC(31, 661, 0, 0) /* stswx */
5284 return OPVCC(31, 662, 0, 0) /* stwbrx */
5286 return OPVCC(31, 660, 0, 0) /* stdbrx */
5288 return OPVCC(31, 694, 0, 1) /* stbcx. */
5290 return OPVCC(31, 726, 0, 1) /* sthcx. */
5292 return OPVCC(31, 150, 0, 1) /* stwcx. */
5294 return OPVCC(31, 214, 0, 1) /* stwdx. */
5296 return OPVCC(31, 149, 0, 0) /* stdx */
5298 return OPVCC(31, 181, 0, 0) /* stdux */
5300 /* Vector (VMX/Altivec) instructions */
5302 return OPVCC(31, 135, 0, 0) /* stvebx - v2.03 */
5304 return OPVCC(31, 167, 0, 0) /* stvehx - v2.03 */
5306 return OPVCC(31, 199, 0, 0) /* stvewx - v2.03 */
5308 return OPVCC(31, 231, 0, 0) /* stvx - v2.03 */
5310 return OPVCC(31, 487, 0, 0) /* stvxl - v2.03 */
5311 /* End of vector instructions */
5313 /* Vector scalar (VSX) instructions */
5315 return OPVXX1(31, 396, 0) /* stxvx - v3.0 */
5317 return OPVXX1(31, 972, 0) /* stxvd2x - v2.06 */
5319 return OPVXX1(31, 908, 0) /* stxvw4x - v2.06 */
5321 return OPVXX1(31, 940, 0) /* stxvh8x - v3.0 */
5323 return OPVXX1(31, 1004, 0) /* stxvb16x - v3.0 */
5326 return OPVXX1(31, 716, 0) /* stxsdx - v2.06 */
5329 return OPVXX1(31, 140, 0) /* stxsiwx - v2.07 */
5331 /* End of vector scalar instructions */
5335 c.ctxt.Diag("unknown storex opcode %v", a)