1 // cmd/9l/optab.c, cmd/9l/asmout.c from Vita Nuova.
3 // Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
4 // Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
5 // Portions Copyright © 1997-1999 Vita Nuova Limited
6 // Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com)
7 // Portions Copyright © 2004,2006 Bruce Ellis
8 // Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
9 // Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others
10 // Portions Copyright © 2009 The Go Authors. All rights reserved.
12 // Permission is hereby granted, free of charge, to any person obtaining a copy
13 // of this software and associated documentation files (the "Software"), to deal
14 // in the Software without restriction, including without limitation the rights
15 // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
16 // copies of the Software, and to permit persons to whom the Software is
17 // furnished to do so, subject to the following conditions:
19 // The above copyright notice and this permission notice shall be included in
20 // all copies or substantial portions of the Software.
22 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
27 // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
44 // ctxt9 holds state while assembling a single function.
45 // Each function gets a fresh ctxt9.
46 // This allows for multiple functions to be safely concurrently assembled.
56 // Instruction layout.
63 // R bit option in prefixed load/store/add D-form operations
64 PFX_R_ABS = 0 // Offset is absolute
65 PFX_R_PCREL = 1 // Offset is relative to PC, RA should be 0
70 a1 uint8 // p.From argument (obj.Addr). p is of type obj.Prog.
71 a2 uint8 // p.Reg argument (int16 Register)
72 a3 uint8 // p.RestArgs[0] (obj.AddrPos)
73 a4 uint8 // p.RestArgs[1]
74 a5 uint8 // p.RestARgs[2]
75 a6 uint8 // p.To (obj.Addr)
76 type_ int8 // cases in asmout below. E.g., 44 = st r,(ra+rb); 45 = ld (ra+rb), r
77 size int8 // Text space in bytes to lay operation
79 // A prefixed instruction is generated by this opcode. This cannot be placed
80 // across a 64B PC address. Opcodes should not translate to more than one
81 // prefixed instruction. The prefixed instruction should be written first
82 // (e.g when Optab.size > 8).
85 asmout func(*ctxt9, *obj.Prog, *Optab, *[5]uint32)
88 // optab contains an array to be sliced of accepted operand combinations for an
89 // instruction. Unused arguments and fields are not explicitly enumerated, and
90 // should not be listed for clarity. Unused arguments and values should always
91 // assume the default value for the given type.
93 // optab does not list every valid ppc64 opcode, it enumerates representative
94 // operand combinations for a class of instruction. The variable oprange indexes
95 // all valid ppc64 opcodes.
97 // oprange is initialized to point a slice within optab which contains the valid
98 // operand combinations for a given instruction. This is initialized from buildop.
100 // Likewise, each slice of optab is dynamically sorted using the ocmp Sort interface
101 // to arrange entries to minimize text size of each opcode.
103 {as: obj.ATEXT, a1: C_LOREG, a6: C_TEXTSIZE, type_: 0, size: 0},
104 {as: obj.ATEXT, a1: C_LOREG, a3: C_LCON, a6: C_TEXTSIZE, type_: 0, size: 0},
105 {as: obj.ATEXT, a1: C_ADDR, a6: C_TEXTSIZE, type_: 0, size: 0},
106 {as: obj.ATEXT, a1: C_ADDR, a3: C_LCON, a6: C_TEXTSIZE, type_: 0, size: 0},
108 {as: AADD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
109 {as: AADD, a1: C_REG, a6: C_REG, type_: 2, size: 4},
110 {as: AADD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
111 {as: AADD, a1: C_SCON, a6: C_REG, type_: 4, size: 4},
112 {as: AADD, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
113 {as: AADD, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
114 {as: AADD, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 20, size: 4},
115 {as: AADD, a1: C_UCON, a6: C_REG, type_: 20, size: 4},
116 {as: AADD, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 22, size: 8},
117 {as: AADD, a1: C_ANDCON, a6: C_REG, type_: 22, size: 8},
118 {as: AADDIS, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 20, size: 4},
119 {as: AADDIS, a1: C_ADDCON, a6: C_REG, type_: 20, size: 4},
120 {as: AADDC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
121 {as: AADDC, a1: C_REG, a6: C_REG, type_: 2, size: 4},
122 {as: AADDC, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
123 {as: AADDC, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
124 {as: AADDC, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12},
125 {as: AADDC, a1: C_LCON, a6: C_REG, type_: 22, size: 12},
126 {as: AAND, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, /* logical, no literal */
127 {as: AAND, a1: C_REG, a6: C_REG, type_: 6, size: 4},
128 {as: AANDCC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
129 {as: AANDCC, a1: C_REG, a6: C_REG, type_: 6, size: 4},
130 {as: AANDCC, a1: C_ANDCON, a6: C_REG, type_: 58, size: 4},
131 {as: AANDCC, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 58, size: 4},
132 {as: AANDCC, a1: C_UCON, a6: C_REG, type_: 59, size: 4},
133 {as: AANDCC, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
134 {as: AANDCC, a1: C_ADDCON, a6: C_REG, type_: 23, size: 8},
135 {as: AANDCC, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 23, size: 8},
136 {as: AANDCC, a1: C_LCON, a6: C_REG, type_: 23, size: 12},
137 {as: AANDCC, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 23, size: 12},
138 {as: AANDISCC, a1: C_ANDCON, a6: C_REG, type_: 59, size: 4},
139 {as: AANDISCC, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
140 {as: AMULLW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
141 {as: AMULLW, a1: C_REG, a6: C_REG, type_: 2, size: 4},
142 {as: AMULLW, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
143 {as: AMULLW, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
144 {as: AMULLW, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
145 {as: AMULLW, a1: C_ANDCON, a6: C_REG, type_: 4, size: 4},
146 {as: AMULLW, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12},
147 {as: AMULLW, a1: C_LCON, a6: C_REG, type_: 22, size: 12},
148 {as: ASUBC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 10, size: 4},
149 {as: ASUBC, a1: C_REG, a6: C_REG, type_: 10, size: 4},
150 {as: ASUBC, a1: C_REG, a3: C_ADDCON, a6: C_REG, type_: 27, size: 4},
151 {as: ASUBC, a1: C_REG, a3: C_LCON, a6: C_REG, type_: 28, size: 12},
152 {as: AOR, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, /* logical, literal not cc (or/xor) */
153 {as: AOR, a1: C_REG, a6: C_REG, type_: 6, size: 4},
154 {as: AOR, a1: C_ANDCON, a6: C_REG, type_: 58, size: 4},
155 {as: AOR, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 58, size: 4},
156 {as: AOR, a1: C_UCON, a6: C_REG, type_: 59, size: 4},
157 {as: AOR, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
158 {as: AOR, a1: C_ADDCON, a6: C_REG, type_: 23, size: 8},
159 {as: AOR, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 23, size: 8},
160 {as: AOR, a1: C_LCON, a6: C_REG, type_: 23, size: 12},
161 {as: AOR, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 23, size: 12},
162 {as: AORIS, a1: C_ANDCON, a6: C_REG, type_: 59, size: 4},
163 {as: AORIS, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
164 {as: ADIVW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4}, /* op r1[,r2],r3 */
165 {as: ADIVW, a1: C_REG, a6: C_REG, type_: 2, size: 4},
166 {as: ASUB, a1: C_REG, a2: C_REG, a6: C_REG, type_: 10, size: 4}, /* op r2[,r1],r3 */
167 {as: ASUB, a1: C_REG, a6: C_REG, type_: 10, size: 4},
168 {as: ASLW, a1: C_REG, a6: C_REG, type_: 6, size: 4},
169 {as: ASLW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
170 {as: ASLD, a1: C_REG, a6: C_REG, type_: 6, size: 4},
171 {as: ASLD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
172 {as: ASLD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 25, size: 4},
173 {as: ASLD, a1: C_SCON, a6: C_REG, type_: 25, size: 4},
174 {as: AEXTSWSLI, a1: C_SCON, a6: C_REG, type_: 25, size: 4},
175 {as: AEXTSWSLI, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 25, size: 4},
176 {as: ASLW, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 57, size: 4},
177 {as: ASLW, a1: C_SCON, a6: C_REG, type_: 57, size: 4},
178 {as: ASRAW, a1: C_REG, a6: C_REG, type_: 6, size: 4},
179 {as: ASRAW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
180 {as: ASRAW, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 56, size: 4},
181 {as: ASRAW, a1: C_SCON, a6: C_REG, type_: 56, size: 4},
182 {as: ASRAD, a1: C_REG, a6: C_REG, type_: 6, size: 4},
183 {as: ASRAD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
184 {as: ASRAD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 56, size: 4},
185 {as: ASRAD, a1: C_SCON, a6: C_REG, type_: 56, size: 4},
186 {as: ARLWMI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 62, size: 4},
187 {as: ARLWMI, a1: C_SCON, a2: C_REG, a3: C_SCON, a4: C_SCON, a6: C_REG, type_: 102, size: 4},
188 {as: ARLWNM, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 62, size: 4},
189 {as: ARLWNM, a1: C_SCON, a2: C_REG, a3: C_SCON, a4: C_SCON, a6: C_REG, type_: 102, size: 4},
190 {as: ARLWNM, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 63, size: 4},
191 {as: ARLWNM, a1: C_REG, a2: C_REG, a3: C_SCON, a4: C_SCON, a6: C_REG, type_: 103, size: 4},
192 {as: ACLRLSLWI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 62, size: 4},
193 {as: ARLDMI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 30, size: 4},
194 {as: ARLDC, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 29, size: 4},
195 {as: ARLDCL, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 29, size: 4},
196 {as: ARLDCL, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
197 {as: ARLDICL, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
198 {as: ARLDICL, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
199 {as: ARLDCL, a1: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
200 {as: AFADD, a1: C_FREG, a6: C_FREG, type_: 2, size: 4},
201 {as: AFADD, a1: C_FREG, a2: C_FREG, a6: C_FREG, type_: 2, size: 4},
202 {as: AFABS, a1: C_FREG, a6: C_FREG, type_: 33, size: 4},
203 {as: AFABS, a6: C_FREG, type_: 33, size: 4},
204 {as: AFMADD, a1: C_FREG, a2: C_FREG, a3: C_FREG, a6: C_FREG, type_: 34, size: 4},
205 {as: AFMUL, a1: C_FREG, a6: C_FREG, type_: 32, size: 4},
206 {as: AFMUL, a1: C_FREG, a2: C_FREG, a6: C_FREG, type_: 32, size: 4},
208 {as: AMOVBU, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
209 {as: AMOVBU, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
210 {as: AMOVBU, a1: C_SOREG, a6: C_REG, type_: 8, size: 8},
211 {as: AMOVBU, a1: C_XOREG, a6: C_REG, type_: 109, size: 8},
213 {as: AMOVBZU, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
214 {as: AMOVBZU, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
215 {as: AMOVBZU, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
216 {as: AMOVBZU, a1: C_XOREG, a6: C_REG, type_: 109, size: 4},
218 {as: AMOVHBR, a1: C_REG, a6: C_XOREG, type_: 44, size: 4},
219 {as: AMOVHBR, a1: C_XOREG, a6: C_REG, type_: 45, size: 4},
221 {as: AMOVB, a1: C_SOREG, a6: C_REG, type_: 8, size: 8},
222 {as: AMOVB, a1: C_XOREG, a6: C_REG, type_: 109, size: 8},
223 {as: AMOVB, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
224 {as: AMOVB, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
225 {as: AMOVB, a1: C_REG, a6: C_REG, type_: 13, size: 4},
227 {as: AMOVBZ, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
228 {as: AMOVBZ, a1: C_XOREG, a6: C_REG, type_: 109, size: 4},
229 {as: AMOVBZ, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
230 {as: AMOVBZ, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
231 {as: AMOVBZ, a1: C_REG, a6: C_REG, type_: 13, size: 4},
233 {as: AMOVD, a1: C_ADDCON, a6: C_REG, type_: 3, size: 4},
234 {as: AMOVD, a1: C_ANDCON, a6: C_REG, type_: 3, size: 4},
235 {as: AMOVD, a1: C_UCON, a6: C_REG, type_: 3, size: 4},
236 {as: AMOVD, a1: C_SACON, a6: C_REG, type_: 3, size: 4},
237 {as: AMOVD, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
238 {as: AMOVD, a1: C_XOREG, a6: C_REG, type_: 109, size: 4},
239 {as: AMOVD, a1: C_SOREG, a6: C_SPR, type_: 107, size: 8},
240 {as: AMOVD, a1: C_SPR, a6: C_REG, type_: 66, size: 4},
241 {as: AMOVD, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
242 {as: AMOVD, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
243 {as: AMOVD, a1: C_SPR, a6: C_SOREG, type_: 106, size: 8},
244 {as: AMOVD, a1: C_REG, a6: C_SPR, type_: 66, size: 4},
245 {as: AMOVD, a1: C_REG, a6: C_REG, type_: 13, size: 4},
247 {as: AMOVW, a1: C_ADDCON, a6: C_REG, type_: 3, size: 4},
248 {as: AMOVW, a1: C_ANDCON, a6: C_REG, type_: 3, size: 4},
249 {as: AMOVW, a1: C_UCON, a6: C_REG, type_: 3, size: 4},
250 {as: AMOVW, a1: C_SACON, a6: C_REG, type_: 3, size: 4},
251 {as: AMOVW, a1: C_CREG, a6: C_REG, type_: 68, size: 4},
252 {as: AMOVW, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
253 {as: AMOVW, a1: C_XOREG, a6: C_REG, type_: 109, size: 4},
254 {as: AMOVW, a1: C_SPR, a6: C_REG, type_: 66, size: 4},
255 {as: AMOVW, a1: C_REG, a6: C_CREG, type_: 69, size: 4},
256 {as: AMOVW, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
257 {as: AMOVW, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
258 {as: AMOVW, a1: C_REG, a6: C_SPR, type_: 66, size: 4},
259 {as: AMOVW, a1: C_REG, a6: C_REG, type_: 13, size: 4},
261 {as: AFMOVD, a1: C_ADDCON, a6: C_FREG, type_: 24, size: 8},
262 {as: AFMOVD, a1: C_SOREG, a6: C_FREG, type_: 8, size: 4},
263 {as: AFMOVD, a1: C_XOREG, a6: C_FREG, type_: 109, size: 4},
264 {as: AFMOVD, a1: C_ZCON, a6: C_FREG, type_: 24, size: 4},
265 {as: AFMOVD, a1: C_FREG, a6: C_FREG, type_: 33, size: 4},
266 {as: AFMOVD, a1: C_FREG, a6: C_SOREG, type_: 7, size: 4},
267 {as: AFMOVD, a1: C_FREG, a6: C_XOREG, type_: 108, size: 4},
269 {as: AFMOVSX, a1: C_XOREG, a6: C_FREG, type_: 45, size: 4},
270 {as: AFMOVSX, a1: C_FREG, a6: C_XOREG, type_: 44, size: 4},
272 {as: AFMOVSZ, a1: C_ZOREG, a6: C_FREG, type_: 45, size: 4},
273 {as: AFMOVSZ, a1: C_XOREG, a6: C_FREG, type_: 45, size: 4},
275 {as: AMOVFL, a1: C_CREG, a6: C_CREG, type_: 67, size: 4},
276 {as: AMOVFL, a1: C_FPSCR, a6: C_CREG, type_: 73, size: 4},
277 {as: AMOVFL, a1: C_FPSCR, a6: C_FREG, type_: 53, size: 4},
278 {as: AMOVFL, a1: C_FREG, a3: C_LCON, a6: C_FPSCR, type_: 64, size: 4},
279 {as: AMOVFL, a1: C_FREG, a6: C_FPSCR, type_: 64, size: 4},
280 {as: AMOVFL, a1: C_LCON, a6: C_FPSCR, type_: 65, size: 4},
281 {as: AMOVFL, a1: C_REG, a6: C_CREG, type_: 69, size: 4},
282 {as: AMOVFL, a1: C_REG, a6: C_LCON, type_: 69, size: 4},
284 {as: ASYSCALL, type_: 5, size: 4},
285 {as: ASYSCALL, a1: C_REG, type_: 77, size: 12},
286 {as: ASYSCALL, a1: C_SCON, type_: 77, size: 12},
287 {as: ABEQ, a6: C_SBRA, type_: 16, size: 4},
288 {as: ABEQ, a1: C_CREG, a6: C_SBRA, type_: 16, size: 4},
289 {as: ABR, a6: C_LBRA, type_: 11, size: 4}, // b label
290 {as: ABR, a6: C_LBRAPIC, type_: 11, size: 8}, // b label; nop
291 {as: ABR, a6: C_LR, type_: 18, size: 4}, // blr
292 {as: ABR, a6: C_CTR, type_: 18, size: 4}, // bctr
293 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_SBRA, type_: 16, size: 4}, // bc bo, bi, label
294 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_LBRA, type_: 17, size: 4}, // bc bo, bi, label
295 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_LR, type_: 18, size: 4}, // bclr bo, bi
296 {as: ABC, a1: C_SCON, a2: C_CRBIT, a3: C_SCON, a6: C_LR, type_: 18, size: 4}, // bclr bo, bi, bh
297 {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_CTR, type_: 18, size: 4}, // bcctr bo, bi
298 {as: ABDNZ, a6: C_SBRA, type_: 16, size: 4},
299 {as: ASYNC, type_: 46, size: 4},
300 {as: AWORD, a1: C_LCON, type_: 40, size: 4},
301 {as: ADWORD, a1: C_64CON, type_: 31, size: 8},
302 {as: ADWORD, a1: C_LACON, type_: 31, size: 8},
303 {as: AADDME, a1: C_REG, a6: C_REG, type_: 47, size: 4},
304 {as: AEXTSB, a1: C_REG, a6: C_REG, type_: 48, size: 4},
305 {as: AEXTSB, a6: C_REG, type_: 48, size: 4},
306 {as: AISEL, a1: C_U5CON, a2: C_REG, a3: C_REG, a6: C_REG, type_: 84, size: 4},
307 {as: AISEL, a1: C_CRBIT, a2: C_REG, a3: C_REG, a6: C_REG, type_: 84, size: 4},
308 {as: ANEG, a1: C_REG, a6: C_REG, type_: 47, size: 4},
309 {as: ANEG, a6: C_REG, type_: 47, size: 4},
310 {as: AREM, a1: C_REG, a6: C_REG, type_: 50, size: 12},
311 {as: AREM, a1: C_REG, a2: C_REG, a6: C_REG, type_: 50, size: 12},
312 {as: AREMU, a1: C_REG, a6: C_REG, type_: 50, size: 16},
313 {as: AREMU, a1: C_REG, a2: C_REG, a6: C_REG, type_: 50, size: 16},
314 {as: AREMD, a1: C_REG, a6: C_REG, type_: 51, size: 12},
315 {as: AREMD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 51, size: 12},
316 {as: AMTFSB0, a1: C_SCON, type_: 52, size: 4},
317 /* Other ISA 2.05+ instructions */
318 {as: APOPCNTD, a1: C_REG, a6: C_REG, type_: 93, size: 4}, /* population count, x-form */
319 {as: ACMPB, a1: C_REG, a2: C_REG, a6: C_REG, type_: 92, size: 4}, /* compare byte, x-form */
320 {as: ACMPEQB, a1: C_REG, a2: C_REG, a6: C_CREG, type_: 92, size: 4}, /* compare equal byte, x-form, ISA 3.0 */
321 {as: ACMPEQB, a1: C_REG, a6: C_REG, type_: 70, size: 4},
322 {as: AFTDIV, a1: C_FREG, a2: C_FREG, a6: C_SCON, type_: 92, size: 4}, /* floating test for sw divide, x-form */
323 {as: AFTSQRT, a1: C_FREG, a6: C_SCON, type_: 93, size: 4}, /* floating test for sw square root, x-form */
324 {as: ACOPY, a1: C_REG, a6: C_REG, type_: 92, size: 4}, /* copy/paste facility, x-form */
325 {as: ADARN, a1: C_SCON, a6: C_REG, type_: 92, size: 4}, /* deliver random number, x-form */
326 {as: AMADDHD, a1: C_REG, a2: C_REG, a3: C_REG, a6: C_REG, type_: 83, size: 4}, /* multiply-add high/low doubleword, va-form */
327 {as: AADDEX, a1: C_REG, a2: C_REG, a3: C_SCON, a6: C_REG, type_: 94, size: 4}, /* add extended using alternate carry, z23-form */
328 {as: ACRAND, a1: C_CRBIT, a2: C_CRBIT, a6: C_CRBIT, type_: 2, size: 4}, /* logical ops for condition register bits xl-form */
330 /* Misc ISA 3.0 instructions */
331 {as: ASETB, a1: C_CREG, a6: C_REG, type_: 110, size: 4},
332 {as: AVCLZLSBB, a1: C_VREG, a6: C_REG, type_: 85, size: 4},
334 /* Vector instructions */
337 {as: ALVEBX, a1: C_XOREG, a6: C_VREG, type_: 45, size: 4}, /* vector load, x-form */
340 {as: ASTVEBX, a1: C_VREG, a6: C_XOREG, type_: 44, size: 4}, /* vector store, x-form */
343 {as: AVAND, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector and, vx-form */
344 {as: AVOR, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector or, vx-form */
347 {as: AVADDUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add unsigned modulo, vx-form */
348 {as: AVADDCU, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add & write carry unsigned, vx-form */
349 {as: AVADDUS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add unsigned saturate, vx-form */
350 {as: AVADDSS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add signed saturate, vx-form */
351 {as: AVADDE, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector add extended, va-form */
353 /* Vector subtract */
354 {as: AVSUBUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract unsigned modulo, vx-form */
355 {as: AVSUBCU, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract & write carry unsigned, vx-form */
356 {as: AVSUBUS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract unsigned saturate, vx-form */
357 {as: AVSUBSS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract signed saturate, vx-form */
358 {as: AVSUBE, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector subtract extended, va-form */
360 /* Vector multiply */
361 {as: AVMULESB, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector multiply, vx-form */
362 {as: AVPMSUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector polynomial multiply & sum, vx-form */
363 {as: AVMSUMUDM, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector multiply-sum, va-form */
366 {as: AVR, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector rotate, vx-form */
369 {as: AVS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector shift, vx-form */
370 {as: AVSA, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector shift algebraic, vx-form */
371 {as: AVSOI, a1: C_ANDCON, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector shift by octet immediate, va-form */
374 {as: AVCLZ, a1: C_VREG, a6: C_VREG, type_: 85, size: 4}, /* vector count leading zeros, vx-form */
375 {as: AVPOPCNT, a1: C_VREG, a6: C_VREG, type_: 85, size: 4}, /* vector population count, vx-form */
378 {as: AVCMPEQ, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare equal, vc-form */
379 {as: AVCMPGT, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare greater than, vc-form */
380 {as: AVCMPNEZB, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare not equal, vx-form */
383 {as: AVMRGOW, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector merge odd word, vx-form */
386 {as: AVPERM, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector permute, va-form */
388 /* Vector bit permute */
389 {as: AVBPERMQ, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector bit permute, vx-form */
392 {as: AVSEL, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector select, va-form */
395 {as: AVSPLTB, a1: C_SCON, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector splat, vx-form */
396 {as: AVSPLTB, a1: C_ADDCON, a2: C_VREG, a6: C_VREG, type_: 82, size: 4},
397 {as: AVSPLTISB, a1: C_SCON, a6: C_VREG, type_: 82, size: 4}, /* vector splat immediate, vx-form */
398 {as: AVSPLTISB, a1: C_ADDCON, a6: C_VREG, type_: 82, size: 4},
401 {as: AVCIPH, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES cipher, vx-form */
402 {as: AVNCIPH, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES inverse cipher, vx-form */
403 {as: AVSBOX, a1: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES subbytes, vx-form */
406 {as: AVSHASIGMA, a1: C_ANDCON, a2: C_VREG, a3: C_ANDCON, a6: C_VREG, type_: 82, size: 4}, /* vector SHA sigma, vx-form */
408 /* VSX vector load */
409 {as: ALXVD2X, a1: C_XOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx vector load, xx1-form */
410 {as: ALXV, a1: C_SOREG, a6: C_VSREG, type_: 96, size: 4}, /* vsx vector load, dq-form */
411 {as: ALXVL, a1: C_REG, a2: C_REG, a6: C_VSREG, type_: 98, size: 4}, /* vsx vector load length */
413 /* VSX vector store */
414 {as: ASTXVD2X, a1: C_VSREG, a6: C_XOREG, type_: 86, size: 4}, /* vsx vector store, xx1-form */
415 {as: ASTXV, a1: C_VSREG, a6: C_SOREG, type_: 97, size: 4}, /* vsx vector store, dq-form */
416 {as: ASTXVL, a1: C_VSREG, a2: C_REG, a6: C_REG, type_: 99, size: 4}, /* vsx vector store with length x-form */
418 /* VSX scalar load */
419 {as: ALXSDX, a1: C_XOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx scalar load, xx1-form */
421 /* VSX scalar store */
422 {as: ASTXSDX, a1: C_VSREG, a6: C_XOREG, type_: 86, size: 4}, /* vsx scalar store, xx1-form */
424 /* VSX scalar as integer load */
425 {as: ALXSIWAX, a1: C_XOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx scalar as integer load, xx1-form */
427 /* VSX scalar store as integer */
428 {as: ASTXSIWX, a1: C_VSREG, a6: C_XOREG, type_: 86, size: 4}, /* vsx scalar as integer store, xx1-form */
430 /* VSX move from VSR */
431 {as: AMFVSRD, a1: C_VSREG, a6: C_REG, type_: 88, size: 4},
432 {as: AMFVSRD, a1: C_FREG, a6: C_REG, type_: 88, size: 4},
434 /* VSX move to VSR */
435 {as: AMTVSRD, a1: C_REG, a6: C_VSREG, type_: 104, size: 4},
436 {as: AMTVSRD, a1: C_REG, a6: C_FREG, type_: 104, size: 4},
437 {as: AMTVSRDD, a1: C_REG, a2: C_REG, a6: C_VSREG, type_: 104, size: 4},
440 {as: AXXLAND, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx and, xx3-form */
441 {as: AXXLOR, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx or, xx3-form */
444 {as: AXXSEL, a1: C_VSREG, a2: C_VSREG, a3: C_VSREG, a6: C_VSREG, type_: 91, size: 4}, /* vsx select, xx4-form */
447 {as: AXXMRGHW, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx merge, xx3-form */
450 {as: AXXSPLTW, a1: C_VSREG, a3: C_SCON, a6: C_VSREG, type_: 89, size: 4}, /* vsx splat, xx2-form */
451 {as: AXXSPLTIB, a1: C_SCON, a6: C_VSREG, type_: 100, size: 4}, /* vsx splat, xx2-form */
454 {as: AXXPERM, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx permute, xx3-form */
457 {as: AXXSLDWI, a1: C_VSREG, a2: C_VSREG, a3: C_SCON, a6: C_VSREG, type_: 90, size: 4}, /* vsx shift immediate, xx3-form */
459 /* VSX reverse bytes */
460 {as: AXXBRQ, a1: C_VSREG, a6: C_VSREG, type_: 101, size: 4}, /* vsx reverse bytes */
462 /* VSX scalar FP-FP conversion */
463 {as: AXSCVDPSP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar fp-fp conversion, xx2-form */
465 /* VSX vector FP-FP conversion */
466 {as: AXVCVDPSP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector fp-fp conversion, xx2-form */
468 /* VSX scalar FP-integer conversion */
469 {as: AXSCVDPSXDS, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar fp-integer conversion, xx2-form */
471 /* VSX scalar integer-FP conversion */
472 {as: AXSCVSXDDP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar integer-fp conversion, xx2-form */
474 /* VSX vector FP-integer conversion */
475 {as: AXVCVDPSXDS, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector fp-integer conversion, xx2-form */
477 /* VSX vector integer-FP conversion */
478 {as: AXVCVSXDDP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector integer-fp conversion, xx2-form */
480 {as: ACMP, a1: C_REG, a6: C_REG, type_: 70, size: 4},
481 {as: ACMP, a1: C_REG, a2: C_CREG, a6: C_REG, type_: 70, size: 4},
482 {as: ACMP, a1: C_REG, a6: C_ADDCON, type_: 71, size: 4},
483 {as: ACMP, a1: C_REG, a2: C_CREG, a6: C_ADDCON, type_: 71, size: 4},
484 {as: ACMPU, a1: C_REG, a6: C_REG, type_: 70, size: 4},
485 {as: ACMPU, a1: C_REG, a2: C_CREG, a6: C_REG, type_: 70, size: 4},
486 {as: ACMPU, a1: C_REG, a6: C_ANDCON, type_: 71, size: 4},
487 {as: ACMPU, a1: C_REG, a2: C_CREG, a6: C_ANDCON, type_: 71, size: 4},
488 {as: AFCMPO, a1: C_FREG, a6: C_FREG, type_: 70, size: 4},
489 {as: AFCMPO, a1: C_FREG, a2: C_CREG, a6: C_FREG, type_: 70, size: 4},
490 {as: ATW, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 60, size: 4},
491 {as: ATW, a1: C_LCON, a2: C_REG, a6: C_ADDCON, type_: 61, size: 4},
492 {as: ADCBF, a1: C_SOREG, type_: 43, size: 4},
493 {as: ADCBF, a1: C_XOREG, type_: 43, size: 4},
494 {as: ADCBF, a1: C_XOREG, a2: C_REG, a6: C_SCON, type_: 43, size: 4},
495 {as: ADCBF, a1: C_SOREG, a6: C_SCON, type_: 43, size: 4},
496 {as: ADCBF, a1: C_XOREG, a6: C_SCON, type_: 43, size: 4},
497 {as: ASTDCCC, a1: C_REG, a2: C_REG, a6: C_XOREG, type_: 44, size: 4},
498 {as: ASTDCCC, a1: C_REG, a6: C_XOREG, type_: 44, size: 4},
499 {as: ALDAR, a1: C_XOREG, a6: C_REG, type_: 45, size: 4},
500 {as: ALDAR, a1: C_XOREG, a3: C_ANDCON, a6: C_REG, type_: 45, size: 4},
501 {as: AEIEIO, type_: 46, size: 4},
502 {as: ATLBIE, a1: C_REG, type_: 49, size: 4},
503 {as: ATLBIE, a1: C_SCON, a6: C_REG, type_: 49, size: 4},
504 {as: ASLBMFEE, a1: C_REG, a6: C_REG, type_: 55, size: 4},
505 {as: ASLBMTE, a1: C_REG, a6: C_REG, type_: 55, size: 4},
506 {as: ASTSW, a1: C_REG, a6: C_XOREG, type_: 44, size: 4},
507 {as: ASTSW, a1: C_REG, a3: C_LCON, a6: C_ZOREG, type_: 41, size: 4},
508 {as: ALSW, a1: C_XOREG, a6: C_REG, type_: 45, size: 4},
509 {as: ALSW, a1: C_ZOREG, a3: C_LCON, a6: C_REG, type_: 42, size: 4},
511 {as: obj.AUNDEF, type_: 78, size: 4},
512 {as: obj.APCDATA, a1: C_LCON, a6: C_LCON, type_: 0, size: 0},
513 {as: obj.AFUNCDATA, a1: C_SCON, a6: C_ADDR, type_: 0, size: 0},
514 {as: obj.ANOP, type_: 0, size: 0},
515 {as: obj.ANOP, a1: C_LCON, type_: 0, size: 0}, // NOP operand variations added for #40689
516 {as: obj.ANOP, a1: C_REG, type_: 0, size: 0}, // to preserve previous behavior
517 {as: obj.ANOP, a1: C_FREG, type_: 0, size: 0},
518 {as: obj.ADUFFZERO, a6: C_LBRA, type_: 11, size: 4}, // same as ABR/ABL
519 {as: obj.ADUFFCOPY, a6: C_LBRA, type_: 11, size: 4}, // same as ABR/ABL
520 {as: obj.APCALIGN, a1: C_LCON, type_: 0, size: 0}, // align code
523 // These are opcodes above which may generate different sequences depending on whether prefix opcode support
525 type PrefixableOptab struct {
527 minGOPPC64 int // Minimum GOPPC64 required to support this.
528 pfxsize int8 // Instruction sequence size when prefixed opcodes are used
531 // The prefixable optab entry contains the pseudo-opcodes which generate relocations, or may generate
532 // a more efficient sequence of instructions if a prefixed version exists (ex. paddi instead of oris/ori/add).
534 // This table is meant to transform all sequences which might be TOC-relative into an equivalent PC-relative
535 // sequence. It also encompasses several transformations which do not involve relocations, those could be
536 // separated and applied to AIX and other non-ELF targets. Likewise, the prefixed forms do not have encoding
537 // restrictions on the offset, so they are also used for static binary to allow better code generation. e.x
539 // MOVD something-byte-aligned(Rx), Ry
542 // is allowed when the prefixed forms are used.
544 // This requires an ISA 3.1 compatible cpu (e.g Power10), and when linking externally an ELFv2 1.5 compliant.
545 var prefixableOptab = []PrefixableOptab{
546 {Optab: Optab{as: AMOVD, a1: C_LCON, a6: C_REG, type_: 19, size: 8}, minGOPPC64: 10, pfxsize: 8},
547 {Optab: Optab{as: AMOVD, a1: C_ADDR, a6: C_REG, type_: 75, size: 8}, minGOPPC64: 10, pfxsize: 8},
548 {Optab: Optab{as: AMOVD, a1: C_TLS_LE, a6: C_REG, type_: 79, size: 8}, minGOPPC64: 10, pfxsize: 8},
549 {Optab: Optab{as: AMOVD, a1: C_TLS_IE, a6: C_REG, type_: 80, size: 12}, minGOPPC64: 10, pfxsize: 12},
550 {Optab: Optab{as: AMOVD, a1: C_LACON, a6: C_REG, type_: 26, size: 8}, minGOPPC64: 10, pfxsize: 8},
551 {Optab: Optab{as: AMOVD, a1: C_LOREG, a6: C_REG, type_: 36, size: 8}, minGOPPC64: 10, pfxsize: 8},
552 {Optab: Optab{as: AMOVD, a1: C_REG, a6: C_LOREG, type_: 35, size: 8}, minGOPPC64: 10, pfxsize: 8},
553 {Optab: Optab{as: AMOVD, a1: C_REG, a6: C_ADDR, type_: 74, size: 8}, minGOPPC64: 10, pfxsize: 8},
555 {Optab: Optab{as: AMOVW, a1: C_LCON, a6: C_REG, type_: 19, size: 8}, minGOPPC64: 10, pfxsize: 8},
556 {Optab: Optab{as: AMOVW, a1: C_LACON, a6: C_REG, type_: 26, size: 8}, minGOPPC64: 10, pfxsize: 8},
557 {Optab: Optab{as: AMOVW, a1: C_LOREG, a6: C_REG, type_: 36, size: 8}, minGOPPC64: 10, pfxsize: 8},
558 {Optab: Optab{as: AMOVW, a1: C_ADDR, a6: C_REG, type_: 75, size: 8}, minGOPPC64: 10, pfxsize: 8},
559 {Optab: Optab{as: AMOVW, a1: C_REG, a6: C_LOREG, type_: 35, size: 8}, minGOPPC64: 10, pfxsize: 8},
560 {Optab: Optab{as: AMOVW, a1: C_REG, a6: C_ADDR, type_: 74, size: 8}, minGOPPC64: 10, pfxsize: 8},
562 {Optab: Optab{as: AMOVB, a1: C_REG, a6: C_LOREG, type_: 35, size: 8}, minGOPPC64: 10, pfxsize: 8},
563 {Optab: Optab{as: AMOVB, a1: C_LOREG, a6: C_REG, type_: 36, size: 12}, minGOPPC64: 10, pfxsize: 12},
564 {Optab: Optab{as: AMOVB, a1: C_ADDR, a6: C_REG, type_: 75, size: 12}, minGOPPC64: 10, pfxsize: 12},
565 {Optab: Optab{as: AMOVB, a1: C_REG, a6: C_ADDR, type_: 74, size: 8}, minGOPPC64: 10, pfxsize: 8},
567 {Optab: Optab{as: AMOVBZ, a1: C_LOREG, a6: C_REG, type_: 36, size: 8}, minGOPPC64: 10, pfxsize: 8},
568 {Optab: Optab{as: AMOVBZ, a1: C_ADDR, a6: C_REG, type_: 75, size: 8}, minGOPPC64: 10, pfxsize: 8},
569 {Optab: Optab{as: AMOVBZ, a1: C_REG, a6: C_LOREG, type_: 35, size: 8}, minGOPPC64: 10, pfxsize: 8},
570 {Optab: Optab{as: AMOVBZ, a1: C_REG, a6: C_ADDR, type_: 74, size: 8}, minGOPPC64: 10, pfxsize: 8},
572 {Optab: Optab{as: AFMOVD, a1: C_LOREG, a6: C_FREG, type_: 36, size: 8}, minGOPPC64: 10, pfxsize: 8},
573 {Optab: Optab{as: AFMOVD, a1: C_ADDR, a6: C_FREG, type_: 75, size: 8}, minGOPPC64: 10, pfxsize: 8},
574 {Optab: Optab{as: AFMOVD, a1: C_FREG, a6: C_LOREG, type_: 35, size: 8}, minGOPPC64: 10, pfxsize: 8},
575 {Optab: Optab{as: AFMOVD, a1: C_FREG, a6: C_ADDR, type_: 74, size: 8}, minGOPPC64: 10, pfxsize: 8},
577 {Optab: Optab{as: AADD, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12}, minGOPPC64: 10, pfxsize: 8},
578 {Optab: Optab{as: AADD, a1: C_LCON, a6: C_REG, type_: 22, size: 12}, minGOPPC64: 10, pfxsize: 8},
581 var oprange [ALAST & obj.AMask][]Optab
583 var xcmp [C_NCLASS][C_NCLASS]bool
585 var pfxEnabled = false // ISA 3.1 prefixed instructions are supported.
586 var buildOpCfg = "" // Save the os/cpu/arch tuple used to configure the assembler in buildop
588 // padding bytes to add to align code as requested.
589 func addpad(pc, a int64, ctxt *obj.Link, cursym *obj.LSym) int {
592 // By default function alignment is 16. If an alignment > 16 is
593 // requested then the function alignment must also be promoted.
594 // The function alignment is not promoted on AIX at this time.
595 // TODO: Investigate AIX function alignment.
596 if ctxt.Headtype != objabi.Haix && cursym.Func().Align < int32(a) {
597 cursym.Func().Align = int32(a)
600 return int(a - (pc & (a - 1)))
603 ctxt.Diag("Unexpected alignment: %d for PCALIGN directive\n", a)
608 // Get the implied register of an operand which doesn't specify one. These show up
609 // in handwritten asm like "MOVD R5, foosymbol" where a base register is not supplied,
610 // or "MOVD R5, foo+10(SP) or pseudo-register is used. The other common case is when
611 // generating constants in register like "MOVD $constant, Rx".
612 func (c *ctxt9) getimpliedreg(a *obj.Addr, p *obj.Prog) int {
614 if class >= C_ZCON && class <= C_64CON {
618 case C_SACON, C_LACON:
620 case C_LOREG, C_SOREG, C_ZOREG, C_XOREG:
622 case obj.NAME_EXTERN, obj.NAME_STATIC:
624 case obj.NAME_AUTO, obj.NAME_PARAM:
630 c.ctxt.Diag("failed to determine implied reg for class %v (%v)", DRconv(oclass(a)), p)
634 func span9(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
635 p := cursym.Func().Text
636 if p == nil || p.Link == nil { // handle external functions and ELF section symbols
640 if oprange[AANDN&obj.AMask] == nil {
641 ctxt.Diag("ppc64 ops not initialized, call ppc64.buildop first")
644 c := ctxt9{ctxt: ctxt, newprog: newprog, cursym: cursym, autosize: int32(p.To.Offset)}
651 for p = p.Link; p != nil; p = p.Link {
656 if p.As == obj.APCALIGN {
657 a := c.vregoff(&p.From)
658 m = addpad(pc, a, ctxt, cursym)
660 if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
661 ctxt.Diag("zero-width instruction\n%v", p)
672 * if any procedure is large enough to
673 * generate a large SBRA branch, then
674 * generate extra passes putting branches
675 * around jmps to fix. this is rare.
682 var falign int32 // Track increased alignment requirements for prefix.
686 falign = 0 // Note, linker bumps function symbols to funcAlign.
687 for p = c.cursym.Func().Text.Link; p != nil; p = p.Link {
691 // very large conditional branches
692 if (o.type_ == 16 || o.type_ == 17) && p.To.Target() != nil {
693 otxt = p.To.Target().Pc - pc
694 if otxt < -(1<<15)+10 || otxt >= (1<<15)-10 {
695 // Assemble the instruction with a target not too far to figure out BI and BO fields.
696 // If only the CTR or BI (the CR bit) are tested, the conditional branch can be inverted,
697 // and only one extra branch is needed to reach the target.
699 p.To.SetTarget(p.Link)
700 o.asmout(&c, p, o, &out)
703 bo := int64(out[0]>>21) & 31
704 bi := int16((out[0] >> 16) & 31)
708 // A conditional branch that is unconditionally taken. This cannot be inverted.
709 } else if bo&0x10 == 0x10 {
710 // A branch based on the value of CTR. Invert the CTR comparison against zero bit.
713 } else if bo&0x04 == 0x04 {
714 // A branch based on CR bit. Invert the BI comparison bit.
721 // BC bo,...,far_away_target
724 // BC invert(bo),next_insn
725 // JMP far_away_target
729 p.From = obj.Addr{Type: obj.TYPE_CONST, Name: obj.NAME_NONE, Offset: bo}
732 q.To.Type = obj.TYPE_BRANCH
733 q.To.SetTarget(p.To.Target())
735 p.To.SetTarget(p.Link)
737 p.Reg = REG_CRBIT0 + bi
740 // BC ...,far_away_target
746 // JMP far_away_target
753 q.To.Type = obj.TYPE_BRANCH
754 q.To.SetTarget(p.To.Target())
760 q.To.Type = obj.TYPE_BRANCH
761 q.To.SetTarget(q.Link.Link)
769 if p.As == obj.APCALIGN {
770 a := c.vregoff(&p.From)
771 m = addpad(pc, a, ctxt, cursym)
773 if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
774 ctxt.Diag("zero-width instruction\n%v", p)
780 // Prefixed instructions cannot be placed across a 64B boundary.
781 // Mark and adjust the PC of those which do. A nop will be
782 // inserted during final assembly.
784 mark := p.Mark &^ PFX_X64B
791 // Marks may be adjusted if a too-far conditional branch is
792 // fixed up above. Likewise, inserting a NOP may cause a
793 // branch target to become too far away. We need to run
794 // another iteration and verify no additional changes
801 // Check for 16 or 32B crossing of this prefixed insn.
802 // These do no require padding, but do require increasing
803 // the function alignment to prevent them from potentially
804 // crossing a 64B boundary when the linker assigns the final
807 case 28: // 32B crossing
809 case 12: // 16B crossing
823 c.cursym.Func().Align = falign
824 c.cursym.Grow(c.cursym.Size)
826 // lay out the code, emitting code and data relocations.
829 nop := LOP_IRR(OP_ORI, REGZERO, REGZERO, 0)
831 for p := c.cursym.Func().Text.Link; p != nil; p = p.Link {
834 if int(o.size) > 4*len(out) {
835 log.Fatalf("out array in span9 is too small, need at least %d for %v", o.size/4, p)
837 // asmout is not set up to add large amounts of padding
838 if o.type_ == 0 && p.As == obj.APCALIGN {
839 aln := c.vregoff(&p.From)
840 v := addpad(p.Pc, aln, c.ctxt, c.cursym)
842 // Same padding instruction for all
843 for i = 0; i < int32(v/4); i++ {
844 c.ctxt.Arch.ByteOrder.PutUint32(bp, nop)
849 if p.Mark&PFX_X64B != 0 {
850 c.ctxt.Arch.ByteOrder.PutUint32(bp, nop)
853 o.asmout(&c, p, o, &out)
854 for i = 0; i < int32(o.size/4); i++ {
855 c.ctxt.Arch.ByteOrder.PutUint32(bp, out[i])
862 func isint32(v int64) bool {
863 return int64(int32(v)) == v
866 func isuint32(v uint64) bool {
867 return uint64(uint32(v)) == v
870 func (c *ctxt9) aclassreg(reg int16) int {
871 if REG_R0 <= reg && reg <= REG_R31 {
872 return C_REGP + int(reg&1)
874 if REG_F0 <= reg && reg <= REG_F31 {
875 return C_FREGP + int(reg&1)
877 if REG_V0 <= reg && reg <= REG_V31 {
880 if REG_VS0 <= reg && reg <= REG_VS63 {
881 return C_VSREGP + int(reg&1)
883 if REG_CR0 <= reg && reg <= REG_CR7 || reg == REG_CR {
886 if REG_CR0LT <= reg && reg <= REG_CR7SO {
889 if REG_SPR0 <= reg && reg <= REG_SPR0+1023 {
903 if REG_A0 <= reg && reg <= REG_A7 {
906 if reg == REG_FPSCR {
912 func (c *ctxt9) aclass(a *obj.Addr) int {
918 return c.aclassreg(a.Reg)
922 if a.Name != obj.NAME_NONE || a.Offset != 0 {
923 c.ctxt.Logf("Unexpected Instruction operand index %d offset %d class %d \n", a.Index, a.Offset, a.Class)
929 case obj.NAME_GOTREF, obj.NAME_TOCREF:
932 case obj.NAME_EXTERN,
934 c.instoffset = a.Offset
937 } else if a.Sym.Type == objabi.STLSBSS {
938 // For PIC builds, use 12 byte got initial-exec TLS accesses.
939 if c.ctxt.Flag_shared {
942 // Otherwise, use 8 byte local-exec TLS accesses.
949 c.instoffset = int64(c.autosize) + a.Offset
951 if c.instoffset >= -BIG && c.instoffset < BIG {
957 c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.Arch.FixedFrameSize
958 if c.instoffset >= -BIG && c.instoffset < BIG {
964 c.instoffset = a.Offset
965 if a.Offset == 0 && a.Index == 0 {
967 } else if c.instoffset >= -BIG && c.instoffset < BIG {
976 case obj.TYPE_TEXTSIZE:
979 case obj.TYPE_FCONST:
980 // The only cases where FCONST will occur are with float64 +/- 0.
981 // All other float constants are generated in memory.
982 f64 := a.Val.(float64)
984 if math.Signbit(f64) {
989 log.Fatalf("Unexpected nonzero FCONST operand %v", a)
995 c.instoffset = a.Offset
997 if -BIG <= c.instoffset && c.instoffset < BIG {
1000 if isint32(c.instoffset) {
1006 case obj.NAME_EXTERN,
1012 c.instoffset = a.Offset
1016 c.instoffset = int64(c.autosize) + a.Offset
1017 if c.instoffset >= -BIG && c.instoffset < BIG {
1022 case obj.NAME_PARAM:
1023 c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.Arch.FixedFrameSize
1024 if c.instoffset >= -BIG && c.instoffset < BIG {
1033 if c.instoffset >= 0 {
1034 sbits := bits.Len64(uint64(c.instoffset))
1037 return C_ZCON + sbits
1045 // Special case, a positive int32 value which is a multiple of 2^16
1046 if c.instoffset&0xFFFF == 0 {
1058 sbits := bits.Len64(uint64(^c.instoffset))
1063 // Special case, a negative int32 value which is a multiple of 2^16
1064 if c.instoffset&0xFFFF == 0 {
1075 case obj.TYPE_BRANCH:
1076 if a.Sym != nil && c.ctxt.Flag_dynlink && !pfxEnabled {
1085 func prasm(p *obj.Prog) {
1086 fmt.Printf("%v\n", p)
1089 func (c *ctxt9) oplook(p *obj.Prog) *Optab {
1094 a1 = int(p.From.Class)
1096 a1 = c.aclass(&p.From) + 1
1097 p.From.Class = int8(a1)
1101 argsv := [3]int{C_NONE + 1, C_NONE + 1, C_NONE + 1}
1102 for i, ap := range p.RestArgs {
1103 argsv[i] = int(ap.Addr.Class)
1105 argsv[i] = c.aclass(&ap.Addr) + 1
1106 ap.Addr.Class = int8(argsv[i])
1114 a6 := int(p.To.Class)
1116 a6 = c.aclass(&p.To) + 1
1117 p.To.Class = int8(a6)
1123 a2 = c.aclassreg(p.Reg)
1126 // c.ctxt.Logf("oplook %v %d %d %d %d\n", p, a1, a2, a3, a4, a5, a6)
1127 ops := oprange[p.As&obj.AMask]
1134 for i := range ops {
1136 if c1[op.a1] && c2[op.a2] && c3[op.a3] && c4[op.a4] && c5[op.a5] && c6[op.a6] {
1137 p.Optab = uint16(cap(optab) - cap(ops) + i + 1)
1142 c.ctxt.Diag("illegal combination %v %v %v %v %v %v %v", p.As, DRconv(a1), DRconv(a2), DRconv(a3), DRconv(a4), DRconv(a5), DRconv(a6))
1150 // Compare two operand types (ex C_REG, or C_SCON)
1151 // and return true if b is compatible with a.
1153 // Argument comparison isn't reflexitive, so care must be taken.
1154 // a is the argument type as found in optab, b is the argument as
1155 // fitted by aclass.
1156 func cmp(a int, b int) bool {
1163 if b == C_LR || b == C_XER || b == C_CTR {
1168 return cmp(C_ZCON, b)
1170 return cmp(C_U1CON, b)
1172 return cmp(C_U2CON, b)
1174 return cmp(C_U3CON, b)
1176 return cmp(C_U4CON, b)
1178 return cmp(C_U5CON, b)
1180 return cmp(C_U8CON, b)
1182 return cmp(C_U15CON, b)
1185 return cmp(C_U15CON, b)
1187 return cmp(C_S16CON, b) || cmp(C_U16CON, b) || cmp(C_32S16CON, b)
1189 return cmp(C_32CON, b)
1191 return cmp(C_S34CON, b)
1194 return cmp(C_ZCON, b)
1197 return cmp(C_SACON, b)
1200 return cmp(C_SBRA, b)
1203 return cmp(C_ZOREG, b)
1206 return cmp(C_SOREG, b)
1209 return cmp(C_REG, b) || cmp(C_ZOREG, b)
1211 // An even/odd register input always matches the regular register types.
1213 return cmp(C_REGP, b) || (b == C_ZCON && r0iszero != 0)
1215 return cmp(C_FREGP, b)
1217 /* Allow any VR argument as a VSR operand. */
1218 return cmp(C_VSREGP, b) || cmp(C_VREG, b)
1227 // Used when sorting the optab. Sorting is
1228 // done in a way so that the best choice of
1229 // opcode/operand combination is considered first.
1230 func optabLess(i, j int) bool {
1233 n := int(p1.as) - int(p2.as)
1238 // Consider those that generate fewer
1239 // instructions first.
1240 n = int(p1.size) - int(p2.size)
1244 // operand order should match
1245 // better choices first
1246 n = int(p1.a1) - int(p2.a1)
1250 n = int(p1.a2) - int(p2.a2)
1254 n = int(p1.a3) - int(p2.a3)
1258 n = int(p1.a4) - int(p2.a4)
1262 n = int(p1.a5) - int(p2.a5)
1266 n = int(p1.a6) - int(p2.a6)
1273 // Add an entry to the opcode table for
1274 // a new opcode b0 with the same operand combinations
1276 func opset(a, b0 obj.As) {
1277 oprange[a&obj.AMask] = oprange[b0]
1280 // Determine if the build configuration requires a TOC pointer.
1281 // It is assumed this always called after buildop.
1282 func NeedTOCpointer(ctxt *obj.Link) bool {
1283 return !pfxEnabled && ctxt.Flag_shared
1286 // Build the opcode table
1287 func buildop(ctxt *obj.Link) {
1288 // Limit PC-relative prefix instruction usage to supported and tested targets.
1289 pfxEnabled = buildcfg.GOPPC64 >= 10 && buildcfg.GOOS == "linux"
1290 cfg := fmt.Sprintf("power%d/%s/%s", buildcfg.GOPPC64, buildcfg.GOARCH, buildcfg.GOOS)
1291 if cfg == buildOpCfg {
1292 // Already initialized to correct OS/cpu; stop now.
1293 // This happens in the cmd/asm tests,
1294 // each of which re-initializes the arch.
1299 // Configure the optab entries which may generate prefix opcodes.
1300 prefixOptab := make([]Optab, 0, len(prefixableOptab))
1301 for _, entry := range prefixableOptab {
1303 if pfxEnabled && buildcfg.GOPPC64 >= entry.minGOPPC64 {
1304 // Enable prefix opcode generation and resize.
1306 entry.size = entry.pfxsize
1308 // Use the legacy assembler function if none provided.
1309 if entry.asmout == nil {
1310 entry.asmout = asmout
1312 prefixOptab = append(prefixOptab, entry.Optab)
1316 for i := 0; i < C_NCLASS; i++ {
1317 for n := 0; n < C_NCLASS; n++ {
1323 for i := range optab {
1324 // Use the legacy assembler function if none provided.
1325 if optab[i].asmout == nil {
1326 optab[i].asmout = asmout
1329 // Append the generated entries, sort, and fill out oprange.
1330 optab = append(optab, optabGen...)
1331 optab = append(optab, prefixOptab...)
1332 sort.Slice(optab, optabLess)
1334 for i := 0; i < len(optab); {
1338 for i < len(optab) && optab[i].as == r {
1341 oprange[r0] = optab[start:i]
1346 ctxt.Diag("unknown op in build: %v", r)
1347 log.Fatalf("instruction missing from switch in asm9.go:buildop: %v", r)
1350 case ADCBF: /* unary indexed: op (b+a); op (b) */
1359 case ASTDCCC: /* indexed store: op s,(b+a); op s,(b) */
1364 case AREM: /* macro */
1376 case ADIVW: /* op Rb[,Ra],Rd */
1381 opset(AMULHWUCC, r0)
1383 opset(AMULLWVCC, r0)
1391 opset(ADIVWUVCC, r0)
1408 opset(AMULHDUCC, r0)
1410 opset(AMULLDVCC, r0)
1417 opset(ADIVDEUCC, r0)
1422 opset(ADIVDUVCC, r0)
1434 case APOPCNTD: /* popcntd, popcntw, popcntb, cnttzw, cnttzd */
1438 opset(ACNTTZWCC, r0)
1440 opset(ACNTTZDCC, r0)
1442 case ACOPY: /* copy, paste. */
1445 case AMADDHD: /* maddhd, maddhdu, maddld */
1449 case AMOVBZ: /* lbz, stz, rlwm(r/r), lhz, lha, stz, and x variants */
1453 case AMOVBZU: /* lbz[x]u, stb[x]u, lhz[x]u, lha[x]u, sth[u]x, ld[x]u, std[u]x */
1462 case ALVEBX: /* lvebx, lvehx, lvewx, lvx, lvxl, lvsl, lvsr */
1470 case ASTVEBX: /* stvebx, stvehx, stvewx, stvx, stvxl */
1476 case AVAND: /* vand, vandc, vnand */
1481 case AVMRGOW: /* vmrgew, vmrgow */
1484 case AVOR: /* vor, vorc, vxor, vnor, veqv */
1491 case AVADDUM: /* vaddubm, vadduhm, vadduwm, vaddudm, vadduqm */
1498 case AVADDCU: /* vaddcuq, vaddcuw */
1502 case AVADDUS: /* vaddubs, vadduhs, vadduws */
1507 case AVADDSS: /* vaddsbs, vaddshs, vaddsws */
1512 case AVADDE: /* vaddeuqm, vaddecuq */
1513 opset(AVADDEUQM, r0)
1514 opset(AVADDECUQ, r0)
1516 case AVSUBUM: /* vsububm, vsubuhm, vsubuwm, vsubudm, vsubuqm */
1523 case AVSUBCU: /* vsubcuq, vsubcuw */
1527 case AVSUBUS: /* vsububs, vsubuhs, vsubuws */
1532 case AVSUBSS: /* vsubsbs, vsubshs, vsubsws */
1537 case AVSUBE: /* vsubeuqm, vsubecuq */
1538 opset(AVSUBEUQM, r0)
1539 opset(AVSUBECUQ, r0)
1541 case AVMULESB: /* vmulesb, vmulosb, vmuleub, vmuloub, vmulosh, vmulouh, vmulesw, vmulosw, vmuleuw, vmulouw, vmuluwm */
1554 case AVPMSUM: /* vpmsumb, vpmsumh, vpmsumw, vpmsumd */
1560 case AVR: /* vrlb, vrlh, vrlw, vrld */
1566 case AVS: /* vs[l,r], vs[l,r]o, vs[l,r]b, vs[l,r]h, vs[l,r]w, vs[l,r]d */
1580 case AVSA: /* vsrab, vsrah, vsraw, vsrad */
1586 case AVSOI: /* vsldoi */
1589 case AVCLZ: /* vclzb, vclzh, vclzw, vclzd */
1595 case AVPOPCNT: /* vpopcntb, vpopcnth, vpopcntw, vpopcntd */
1596 opset(AVPOPCNTB, r0)
1597 opset(AVPOPCNTH, r0)
1598 opset(AVPOPCNTW, r0)
1599 opset(AVPOPCNTD, r0)
1601 case AVCMPEQ: /* vcmpequb[.], vcmpequh[.], vcmpequw[.], vcmpequd[.] */
1602 opset(AVCMPEQUB, r0)
1603 opset(AVCMPEQUBCC, r0)
1604 opset(AVCMPEQUH, r0)
1605 opset(AVCMPEQUHCC, r0)
1606 opset(AVCMPEQUW, r0)
1607 opset(AVCMPEQUWCC, r0)
1608 opset(AVCMPEQUD, r0)
1609 opset(AVCMPEQUDCC, r0)
1611 case AVCMPGT: /* vcmpgt[u,s]b[.], vcmpgt[u,s]h[.], vcmpgt[u,s]w[.], vcmpgt[u,s]d[.] */
1612 opset(AVCMPGTUB, r0)
1613 opset(AVCMPGTUBCC, r0)
1614 opset(AVCMPGTUH, r0)
1615 opset(AVCMPGTUHCC, r0)
1616 opset(AVCMPGTUW, r0)
1617 opset(AVCMPGTUWCC, r0)
1618 opset(AVCMPGTUD, r0)
1619 opset(AVCMPGTUDCC, r0)
1620 opset(AVCMPGTSB, r0)
1621 opset(AVCMPGTSBCC, r0)
1622 opset(AVCMPGTSH, r0)
1623 opset(AVCMPGTSHCC, r0)
1624 opset(AVCMPGTSW, r0)
1625 opset(AVCMPGTSWCC, r0)
1626 opset(AVCMPGTSD, r0)
1627 opset(AVCMPGTSDCC, r0)
1629 case AVCMPNEZB: /* vcmpnezb[.] */
1630 opset(AVCMPNEZBCC, r0)
1632 opset(AVCMPNEBCC, r0)
1634 opset(AVCMPNEHCC, r0)
1636 opset(AVCMPNEWCC, r0)
1638 case AVPERM: /* vperm */
1639 opset(AVPERMXOR, r0)
1642 case AVBPERMQ: /* vbpermq, vbpermd */
1645 case AVSEL: /* vsel */
1648 case AVSPLTB: /* vspltb, vsplth, vspltw */
1652 case AVSPLTISB: /* vspltisb, vspltish, vspltisw */
1653 opset(AVSPLTISH, r0)
1654 opset(AVSPLTISW, r0)
1656 case AVCIPH: /* vcipher, vcipherlast */
1658 opset(AVCIPHERLAST, r0)
1660 case AVNCIPH: /* vncipher, vncipherlast */
1661 opset(AVNCIPHER, r0)
1662 opset(AVNCIPHERLAST, r0)
1664 case AVSBOX: /* vsbox */
1667 case AVSHASIGMA: /* vshasigmaw, vshasigmad */
1668 opset(AVSHASIGMAW, r0)
1669 opset(AVSHASIGMAD, r0)
1671 case ALXVD2X: /* lxvd2x, lxvdsx, lxvw4x, lxvh8x, lxvb16x */
1677 case ALXV: /* lxv */
1680 case ALXVL: /* lxvl, lxvll, lxvx */
1684 case ASTXVD2X: /* stxvd2x, stxvdsx, stxvw4x, stxvh8x, stxvb16x */
1687 opset(ASTXVB16X, r0)
1689 case ASTXV: /* stxv */
1692 case ASTXVL: /* stxvl, stxvll, stvx */
1696 case ALXSDX: /* lxsdx */
1699 case ASTXSDX: /* stxsdx */
1702 case ALXSIWAX: /* lxsiwax, lxsiwzx */
1705 case ASTXSIWX: /* stxsiwx */
1708 case AMFVSRD: /* mfvsrd, mfvsrwz (and extended mnemonics), mfvsrld */
1714 case AMTVSRD: /* mtvsrd, mtvsrwa, mtvsrwz (and extended mnemonics), mtvsrdd, mtvsrws */
1721 case AXXLAND: /* xxland, xxlandc, xxleqv, xxlnand */
1726 case AXXLOR: /* xxlorc, xxlnor, xxlor, xxlxor */
1732 case AXXSEL: /* xxsel */
1735 case AXXMRGHW: /* xxmrghw, xxmrglw */
1738 case AXXSPLTW: /* xxspltw */
1741 case AXXSPLTIB: /* xxspltib */
1742 opset(AXXSPLTIB, r0)
1744 case AXXPERM: /* xxpermdi */
1747 case AXXSLDWI: /* xxsldwi */
1748 opset(AXXPERMDI, r0)
1751 case AXXBRQ: /* xxbrq, xxbrd, xxbrw, xxbrh */
1756 case AXSCVDPSP: /* xscvdpsp, xscvspdp, xscvdpspn, xscvspdpn */
1757 opset(AXSCVSPDP, r0)
1758 opset(AXSCVDPSPN, r0)
1759 opset(AXSCVSPDPN, r0)
1761 case AXVCVDPSP: /* xvcvdpsp, xvcvspdp */
1762 opset(AXVCVSPDP, r0)
1764 case AXSCVDPSXDS: /* xscvdpsxds, xscvdpsxws, xscvdpuxds, xscvdpuxws */
1765 opset(AXSCVDPSXWS, r0)
1766 opset(AXSCVDPUXDS, r0)
1767 opset(AXSCVDPUXWS, r0)
1769 case AXSCVSXDDP: /* xscvsxddp, xscvuxddp, xscvsxdsp, xscvuxdsp */
1770 opset(AXSCVUXDDP, r0)
1771 opset(AXSCVSXDSP, r0)
1772 opset(AXSCVUXDSP, r0)
1774 case AXVCVDPSXDS: /* xvcvdpsxds, xvcvdpsxws, xvcvdpuxds, xvcvdpuxws, xvcvspsxds, xvcvspsxws, xvcvspuxds, xvcvspuxws */
1775 opset(AXVCVDPSXDS, r0)
1776 opset(AXVCVDPSXWS, r0)
1777 opset(AXVCVDPUXDS, r0)
1778 opset(AXVCVDPUXWS, r0)
1779 opset(AXVCVSPSXDS, r0)
1780 opset(AXVCVSPSXWS, r0)
1781 opset(AXVCVSPUXDS, r0)
1782 opset(AXVCVSPUXWS, r0)
1784 case AXVCVSXDDP: /* xvcvsxddp, xvcvsxwdp, xvcvuxddp, xvcvuxwdp, xvcvsxdsp, xvcvsxwsp, xvcvuxdsp, xvcvuxwsp */
1785 opset(AXVCVSXWDP, r0)
1786 opset(AXVCVUXDDP, r0)
1787 opset(AXVCVUXWDP, r0)
1788 opset(AXVCVSXDSP, r0)
1789 opset(AXVCVSXWSP, r0)
1790 opset(AXVCVUXDSP, r0)
1791 opset(AXVCVUXWSP, r0)
1793 case AAND: /* logical op Rb,Rs,Ra; no literal */
1807 case AADDME: /* op Ra, Rd */
1811 opset(AADDMEVCC, r0)
1815 opset(AADDZEVCC, r0)
1819 opset(ASUBMEVCC, r0)
1823 opset(ASUBZEVCC, r0)
1846 case AEXTSB: /* op Rs, Ra */
1852 opset(ACNTLZWCC, r0)
1856 opset(ACNTLZDCC, r0)
1858 case AFABS: /* fop [s,]d */
1870 opset(AFCTIWZCC, r0)
1874 opset(AFCTIDZCC, r0)
1878 opset(AFCFIDUCC, r0)
1880 opset(AFCFIDSCC, r0)
1892 opset(AFRSQRTECC, r0)
1896 opset(AFSQRTSCC, r0)
1903 opset(AFCPSGNCC, r0)
1916 opset(AFMADDSCC, r0)
1920 opset(AFMSUBSCC, r0)
1922 opset(AFNMADDCC, r0)
1924 opset(AFNMADDSCC, r0)
1926 opset(AFNMSUBCC, r0)
1928 opset(AFNMSUBSCC, r0)
1941 opset(AMTFSB0CC, r0)
1943 opset(AMTFSB1CC, r0)
1945 case ANEG: /* op [Ra,] Rd */
1951 case AOR: /* or/xor Rb,Rs,Ra; ori/xori $uimm,Rs,R */
1954 case AORIS: /* oris/xoris $uimm,Rs,Ra */
1969 case ASRAW: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */
1973 opset(AEXTSWSLICC, r0)
1975 case ASRAD: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */
1978 case ASUB: /* SUB Ra,Rb,Rd => subf Rd,ra,rb */
2007 opset(ARLDIMICC, r0)
2018 opset(ARLDICLCC, r0)
2020 opset(ARLDICRCC, r0)
2023 opset(ACLRLSLDI, r0)
2036 case ASYSCALL: /* just the op; flow of control */
2075 case AMOVW: /* load/store/move word with sign extension; move 32-bit literals */
2076 opset(AMOVWZ, r0) /* Same as above, but zero extended */
2079 opset(AVCTZLSBB, r0)
2083 AANDCC, /* and. Rb,Rs,Ra; andi. $uimm,Rs,Ra */
2088 AMOVD, /* load/store/move 64-bit values, including 32-bit literals with/without sign-extension */
2089 AMOVB, /* macro: move byte with sign extension */
2090 AMOVBU, /* macro: move byte with sign extension & update */
2092 /* op $s[,r2],r3; op r1[,r2],r3; no cc/v */
2093 ASUBC, /* op r1,$s,r3; op r1[,r2],r3 */
2120 func OPVXX1(o uint32, xo uint32, oe uint32) uint32 {
2121 return o<<26 | xo<<1 | oe<<11
2124 func OPVXX2(o uint32, xo uint32, oe uint32) uint32 {
2125 return o<<26 | xo<<2 | oe<<11
2128 func OPVXX2VA(o uint32, xo uint32, oe uint32) uint32 {
2129 return o<<26 | xo<<2 | oe<<16
2132 func OPVXX3(o uint32, xo uint32, oe uint32) uint32 {
2133 return o<<26 | xo<<3 | oe<<11
2136 func OPVXX4(o uint32, xo uint32, oe uint32) uint32 {
2137 return o<<26 | xo<<4 | oe<<11
2140 func OPDQ(o uint32, xo uint32, oe uint32) uint32 {
2141 return o<<26 | xo | oe<<4
2144 func OPVX(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2145 return o<<26 | xo | oe<<11 | rc&1
2148 func OPVC(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2149 return o<<26 | xo | oe<<11 | (rc&1)<<10
2152 func OPVCC(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
2153 return o<<26 | xo<<1 | oe<<10 | rc&1
2156 func OPCC(o uint32, xo uint32, rc uint32) uint32 {
2157 return OPVCC(o, xo, 0, rc)
2160 /* Generate MD-form opcode */
2161 func OPMD(o, xo, rc uint32) uint32 {
2162 return o<<26 | xo<<2 | rc&1
2165 /* the order is dest, a/s, b/imm for both arithmetic and logical operations. */
2166 func AOP_RRR(op uint32, d uint32, a uint32, b uint32) uint32 {
2167 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11
2170 /* VX-form 2-register operands, r/none/r */
2171 func AOP_RR(op uint32, d uint32, a uint32) uint32 {
2172 return op | (d&31)<<21 | (a&31)<<11
2175 /* VA-form 4-register operands */
2176 func AOP_RRRR(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2177 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&31)<<6
2180 func AOP_IRR(op uint32, d uint32, a uint32, simm uint32) uint32 {
2181 return op | (d&31)<<21 | (a&31)<<16 | simm&0xFFFF
2184 /* VX-form 2-register + UIM operands */
2185 func AOP_VIRR(op uint32, d uint32, a uint32, simm uint32) uint32 {
2186 return op | (d&31)<<21 | (simm&0xFFFF)<<16 | (a&31)<<11
2189 /* VX-form 2-register + ST + SIX operands */
2190 func AOP_IIRR(op uint32, d uint32, a uint32, sbit uint32, simm uint32) uint32 {
2191 return op | (d&31)<<21 | (a&31)<<16 | (sbit&1)<<15 | (simm&0xF)<<11
2194 /* VA-form 3-register + SHB operands */
2195 func AOP_IRRR(op uint32, d uint32, a uint32, b uint32, simm uint32) uint32 {
2196 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (simm&0xF)<<6
2199 /* VX-form 1-register + SIM operands */
2200 func AOP_IR(op uint32, d uint32, simm uint32) uint32 {
2201 return op | (d&31)<<21 | (simm&31)<<16
2204 /* XX1-form 3-register operands, 1 VSR operand */
2205 func AOP_XX1(op uint32, r uint32, a uint32, b uint32) uint32 {
2206 return op | (r&31)<<21 | (a&31)<<16 | (b&31)<<11 | (r&32)>>5
2209 /* XX2-form 3-register operands, 2 VSR operands */
2210 func AOP_XX2(op uint32, xt uint32, a uint32, xb uint32) uint32 {
2211 return op | (xt&31)<<21 | (a&3)<<16 | (xb&31)<<11 | (xb&32)>>4 | (xt&32)>>5
2214 /* XX3-form 3 VSR operands */
2215 func AOP_XX3(op uint32, xt uint32, xa uint32, xb uint32) uint32 {
2216 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2219 /* XX3-form 3 VSR operands + immediate */
2220 func AOP_XX3I(op uint32, xt uint32, xa uint32, xb uint32, c uint32) uint32 {
2221 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (c&3)<<8 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2224 /* XX4-form, 4 VSR operands */
2225 func AOP_XX4(op uint32, xt uint32, xa uint32, xb uint32, xc uint32) uint32 {
2226 return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xc&31)<<6 | (xc&32)>>2 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
2229 /* DQ-form, VSR register, register + offset operands */
2230 func AOP_DQ(op uint32, xt uint32, a uint32, b uint32) uint32 {
2231 /* The EA for this instruction form is (RA) + DQ << 4, where DQ is a 12-bit signed integer. */
2232 /* In order to match the output of the GNU objdump (and make the usage in Go asm easier), the */
2233 /* instruction is called using the sign extended value (i.e. a valid offset would be -32752 or 32752, */
2234 /* not -2047 or 2047), so 'b' needs to be adjusted to the expected 12-bit DQ value. Bear in mind that */
2235 /* bits 0 to 3 in 'dq' need to be zero, otherwise this will generate an illegal instruction. */
2236 /* If in doubt how this instruction form is encoded, refer to ISA 3.0b, pages 492 and 507. */
2238 return op | (xt&31)<<21 | (a&31)<<16 | (dq&4095)<<4 | (xt&32)>>2
2241 /* Z23-form, 3-register operands + CY field */
2242 func AOP_Z23I(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2243 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&3)<<9
2246 /* X-form, 3-register operands + EH field */
2247 func AOP_RRRI(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
2248 return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c & 1)
2251 func LOP_RRR(op uint32, a uint32, s uint32, b uint32) uint32 {
2252 return op | (s&31)<<21 | (a&31)<<16 | (b&31)<<11
2255 func LOP_IRR(op uint32, a uint32, s uint32, uimm uint32) uint32 {
2256 return op | (s&31)<<21 | (a&31)<<16 | uimm&0xFFFF
2259 func OP_BR(op uint32, li uint32, aa uint32) uint32 {
2260 return op | li&0x03FFFFFC | aa<<1
2263 func OP_BC(op uint32, bo uint32, bi uint32, bd uint32, aa uint32) uint32 {
2264 return op | (bo&0x1F)<<21 | (bi&0x1F)<<16 | bd&0xFFFC | aa<<1
2267 func OP_BCR(op uint32, bo uint32, bi uint32) uint32 {
2268 return op | (bo&0x1F)<<21 | (bi&0x1F)<<16
2271 func OP_RLW(op uint32, a uint32, s uint32, sh uint32, mb uint32, me uint32) uint32 {
2272 return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | (mb&31)<<6 | (me&31)<<1
2275 func AOP_RLDIC(op uint32, a uint32, s uint32, sh uint32, m uint32) uint32 {
2276 return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1 | (m&31)<<6 | ((m&32)>>5)<<5
2279 func AOP_EXTSWSLI(op uint32, a uint32, s uint32, sh uint32) uint32 {
2280 return op | (a&31)<<21 | (s&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1
2283 func AOP_ISEL(op uint32, t uint32, a uint32, b uint32, bc uint32) uint32 {
2284 return op | (t&31)<<21 | (a&31)<<16 | (b&31)<<11 | (bc&0x1F)<<6
2287 func AOP_PFX_00_8LS(r, ie uint32) uint32 {
2288 return 1<<26 | 0<<24 | 0<<23 | (r&1)<<20 | (ie & 0x3FFFF)
2290 func AOP_PFX_10_MLS(r, ie uint32) uint32 {
2291 return 1<<26 | 2<<24 | 0<<23 | (r&1)<<20 | (ie & 0x3FFFF)
2295 /* each rhs is OPVCC(_, _, _, _) */
2296 OP_ADD = 31<<26 | 266<<1 | 0<<10 | 0
2297 OP_ADDI = 14<<26 | 0<<1 | 0<<10 | 0
2298 OP_ADDIS = 15<<26 | 0<<1 | 0<<10 | 0
2299 OP_ANDI = 28<<26 | 0<<1 | 0<<10 | 0
2300 OP_EXTSB = 31<<26 | 954<<1 | 0<<10 | 0
2301 OP_EXTSH = 31<<26 | 922<<1 | 0<<10 | 0
2302 OP_EXTSW = 31<<26 | 986<<1 | 0<<10 | 0
2303 OP_ISEL = 31<<26 | 15<<1 | 0<<10 | 0
2304 OP_MCRF = 19<<26 | 0<<1 | 0<<10 | 0
2305 OP_MCRFS = 63<<26 | 64<<1 | 0<<10 | 0
2306 OP_MCRXR = 31<<26 | 512<<1 | 0<<10 | 0
2307 OP_MFCR = 31<<26 | 19<<1 | 0<<10 | 0
2308 OP_MFFS = 63<<26 | 583<<1 | 0<<10 | 0
2309 OP_MFSPR = 31<<26 | 339<<1 | 0<<10 | 0
2310 OP_MFSR = 31<<26 | 595<<1 | 0<<10 | 0
2311 OP_MFSRIN = 31<<26 | 659<<1 | 0<<10 | 0
2312 OP_MTCRF = 31<<26 | 144<<1 | 0<<10 | 0
2313 OP_MTFSF = 63<<26 | 711<<1 | 0<<10 | 0
2314 OP_MTFSFI = 63<<26 | 134<<1 | 0<<10 | 0
2315 OP_MTSPR = 31<<26 | 467<<1 | 0<<10 | 0
2316 OP_MTSR = 31<<26 | 210<<1 | 0<<10 | 0
2317 OP_MTSRIN = 31<<26 | 242<<1 | 0<<10 | 0
2318 OP_MULLW = 31<<26 | 235<<1 | 0<<10 | 0
2319 OP_MULLD = 31<<26 | 233<<1 | 0<<10 | 0
2320 OP_OR = 31<<26 | 444<<1 | 0<<10 | 0
2321 OP_ORI = 24<<26 | 0<<1 | 0<<10 | 0
2322 OP_ORIS = 25<<26 | 0<<1 | 0<<10 | 0
2323 OP_RLWINM = 21<<26 | 0<<1 | 0<<10 | 0
2324 OP_RLWNM = 23<<26 | 0<<1 | 0<<10 | 0
2325 OP_SUBF = 31<<26 | 40<<1 | 0<<10 | 0
2326 OP_RLDIC = 30<<26 | 4<<1 | 0<<10 | 0
2327 OP_RLDICR = 30<<26 | 2<<1 | 0<<10 | 0
2328 OP_RLDICL = 30<<26 | 0<<1 | 0<<10 | 0
2329 OP_RLDCL = 30<<26 | 8<<1 | 0<<10 | 0
2330 OP_EXTSWSLI = 31<<26 | 445<<2
2331 OP_SETB = 31<<26 | 128<<1
2334 func pfxadd(rt, ra int16, r uint32, imm32 int64) (uint32, uint32) {
2335 return AOP_PFX_10_MLS(r, uint32(imm32>>16)), AOP_IRR(14<<26, uint32(rt), uint32(ra), uint32(imm32))
2338 func pfxload(a obj.As, reg int16, base int16, r uint32) (uint32, uint32) {
2341 return AOP_PFX_10_MLS(r, 0), AOP_IRR(42<<26, uint32(reg), uint32(base), 0)
2343 return AOP_PFX_00_8LS(r, 0), AOP_IRR(41<<26, uint32(reg), uint32(base), 0)
2345 return AOP_PFX_00_8LS(r, 0), AOP_IRR(57<<26, uint32(reg), uint32(base), 0)
2347 return AOP_PFX_10_MLS(r, 0), AOP_IRR(34<<26, uint32(reg), uint32(base), 0)
2349 return AOP_PFX_10_MLS(r, 0), AOP_IRR(40<<26, uint32(reg), uint32(base), 0)
2351 return AOP_PFX_10_MLS(r, 0), AOP_IRR(32<<26, uint32(reg), uint32(base), 0)
2353 return AOP_PFX_10_MLS(r, 0), AOP_IRR(48<<26, uint32(reg), uint32(base), 0)
2355 return AOP_PFX_10_MLS(r, 0), AOP_IRR(50<<26, uint32(reg), uint32(base), 0)
2357 log.Fatalf("Error no pfxload for %v\n", a)
2361 func pfxstore(a obj.As, reg int16, base int16, r uint32) (uint32, uint32) {
2364 return AOP_PFX_00_8LS(r, 0), AOP_IRR(61<<26, uint32(reg), uint32(base), 0)
2366 return AOP_PFX_10_MLS(r, 0), AOP_IRR(38<<26, uint32(reg), uint32(base), 0)
2368 return AOP_PFX_10_MLS(r, 0), AOP_IRR(44<<26, uint32(reg), uint32(base), 0)
2370 return AOP_PFX_10_MLS(r, 0), AOP_IRR(36<<26, uint32(reg), uint32(base), 0)
2372 return AOP_PFX_10_MLS(r, 0), AOP_IRR(52<<26, uint32(reg), uint32(base), 0)
2374 return AOP_PFX_10_MLS(r, 0), AOP_IRR(54<<26, uint32(reg), uint32(base), 0)
2376 log.Fatalf("Error no pfxstore for %v\n", a)
2380 func oclass(a *obj.Addr) int {
2381 return int(a.Class) - 1
2389 // This function determines when a non-indexed load or store is D or
2390 // DS form for use in finding the size of the offset field in the instruction.
2391 // The size is needed when setting the offset value in the instruction
2392 // and when generating relocation for that field.
2393 // DS form instructions include: ld, ldu, lwa, std, stdu. All other
2394 // loads and stores with an offset field are D form. This function should
2395 // only be called with the same opcodes as are handled by opstore and opload.
2396 func (c *ctxt9) opform(insn uint32) int {
2399 c.ctxt.Diag("bad insn in loadform: %x", insn)
2400 case OPVCC(58, 0, 0, 0), // ld
2401 OPVCC(58, 0, 0, 1), // ldu
2402 OPVCC(58, 0, 0, 0) | 1<<1, // lwa
2403 OPVCC(62, 0, 0, 0), // std
2404 OPVCC(62, 0, 0, 1): //stdu
2406 case OP_ADDI, // add
2407 OPVCC(32, 0, 0, 0), // lwz
2408 OPVCC(33, 0, 0, 0), // lwzu
2409 OPVCC(34, 0, 0, 0), // lbz
2410 OPVCC(35, 0, 0, 0), // lbzu
2411 OPVCC(40, 0, 0, 0), // lhz
2412 OPVCC(41, 0, 0, 0), // lhzu
2413 OPVCC(42, 0, 0, 0), // lha
2414 OPVCC(43, 0, 0, 0), // lhau
2415 OPVCC(46, 0, 0, 0), // lmw
2416 OPVCC(48, 0, 0, 0), // lfs
2417 OPVCC(49, 0, 0, 0), // lfsu
2418 OPVCC(50, 0, 0, 0), // lfd
2419 OPVCC(51, 0, 0, 0), // lfdu
2420 OPVCC(36, 0, 0, 0), // stw
2421 OPVCC(37, 0, 0, 0), // stwu
2422 OPVCC(38, 0, 0, 0), // stb
2423 OPVCC(39, 0, 0, 0), // stbu
2424 OPVCC(44, 0, 0, 0), // sth
2425 OPVCC(45, 0, 0, 0), // sthu
2426 OPVCC(47, 0, 0, 0), // stmw
2427 OPVCC(52, 0, 0, 0), // stfs
2428 OPVCC(53, 0, 0, 0), // stfsu
2429 OPVCC(54, 0, 0, 0), // stfd
2430 OPVCC(55, 0, 0, 0): // stfdu
2436 // Encode instructions and create relocation for accessing s+d according to the
2437 // instruction op with source or destination (as appropriate) register reg.
2438 func (c *ctxt9) symbolAccess(s *obj.LSym, d int64, reg int16, op uint32, reuse bool) (o1, o2 uint32, rel *obj.Reloc) {
2439 if c.ctxt.Headtype == objabi.Haix {
2440 // Every symbol access must be made via a TOC anchor.
2441 c.ctxt.Diag("symbolAccess called for %s", s.Name)
2444 form := c.opform(op)
2445 if c.ctxt.Flag_shared {
2450 // If reg can be reused when computing the symbol address,
2451 // use it instead of REGTMP.
2453 o1 = AOP_IRR(OP_ADDIS, REGTMP, base, 0)
2454 o2 = AOP_IRR(op, uint32(reg), REGTMP, 0)
2456 o1 = AOP_IRR(OP_ADDIS, uint32(reg), base, 0)
2457 o2 = AOP_IRR(op, uint32(reg), uint32(reg), 0)
2459 rel = obj.Addrel(c.cursym)
2460 rel.Off = int32(c.pc)
2464 if c.ctxt.Flag_shared {
2467 rel.Type = objabi.R_ADDRPOWER_TOCREL
2469 rel.Type = objabi.R_ADDRPOWER_TOCREL_DS
2475 rel.Type = objabi.R_ADDRPOWER
2477 rel.Type = objabi.R_ADDRPOWER_DS
2486 func getmask(m []byte, v uint32) bool {
2489 if v != ^uint32(0) && v&(1<<31) != 0 && v&1 != 0 { /* MB > ME */
2500 for i := 0; i < 32; i++ {
2501 if v&(1<<uint(31-i)) != 0 {
2506 if i >= 32 || v&(1<<uint(31-i)) == 0 {
2512 if v&(1<<uint(31-i)) != 0 {
2523 func (c *ctxt9) maskgen(p *obj.Prog, m []byte, v uint32) {
2525 c.ctxt.Diag("cannot generate mask #%x\n%v", v, p)
2530 * 64-bit masks (rldic etc)
2532 func getmask64(m []byte, v uint64) bool {
2535 for i := 0; i < 64; i++ {
2536 if v&(uint64(1)<<uint(63-i)) != 0 {
2541 if i >= 64 || v&(uint64(1)<<uint(63-i)) == 0 {
2547 if v&(uint64(1)<<uint(63-i)) != 0 {
2558 func (c *ctxt9) maskgen64(p *obj.Prog, m []byte, v uint64) {
2559 if !getmask64(m, v) {
2560 c.ctxt.Diag("cannot generate mask #%x\n%v", v, p)
2564 func loadu32(r int, d int64) uint32 {
2566 if isuint32(uint64(d)) {
2567 return LOP_IRR(OP_ORIS, uint32(r), REGZERO, uint32(v))
2569 return AOP_IRR(OP_ADDIS, uint32(r), REGZERO, uint32(v))
2572 func high16adjusted(d int32) uint16 {
2574 return uint16((d >> 16) + 1)
2576 return uint16(d >> 16)
2579 func asmout(c *ctxt9, p *obj.Prog, o *Optab, out *[5]uint32) {
2586 //print("%v => case %d\n", p, o->type);
2589 c.ctxt.Diag("unknown type %d", o.type_)
2592 case 0: /* pseudo ops */
2595 case 2: /* int/cr/fp op Rb,[Ra],Rd */
2601 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2603 case 3: /* mov $soreg/addcon/andcon/ucon, r ==> addis/oris/addi/ori $i,reg',r */
2604 d := c.vregoff(&p.From)
2607 r := int(p.From.Reg)
2609 r = c.getimpliedreg(&p.From, p)
2611 if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 && (r != 0 || v != 0) {
2612 c.ctxt.Diag("literal operation on R0\n%v", p)
2617 log.Fatalf("invalid handling of %v", p)
2619 // For UCON operands the value is right shifted 16, using ADDIS if the
2620 // value should be signed, ORIS if unsigned.
2622 if r == REGZERO && isuint32(uint64(d)) {
2623 o1 = LOP_IRR(OP_ORIS, uint32(p.To.Reg), REGZERO, uint32(v))
2628 } else if int64(int16(d)) != d {
2629 // Operand is 16 bit value with sign bit set
2630 if o.a1 == C_ANDCON {
2631 // Needs unsigned 16 bit so use ORI
2632 if r == 0 || r == REGZERO {
2633 o1 = LOP_IRR(uint32(OP_ORI), uint32(p.To.Reg), uint32(0), uint32(v))
2636 // With ADDCON, needs signed 16 bit value, fall through to use ADDI
2637 } else if o.a1 != C_ADDCON {
2638 log.Fatalf("invalid handling of %v", p)
2642 o1 = AOP_IRR(uint32(a), uint32(p.To.Reg), uint32(r), uint32(v))
2644 case 4: /* add/mul $scon,[r1],r2 */
2645 v := c.regoff(&p.From)
2651 if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 {
2652 c.ctxt.Diag("literal operation on R0\n%v", p)
2654 if int32(int16(v)) != v {
2655 log.Fatalf("mishandled instruction %v", p)
2657 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2659 case 5: /* syscall */
2662 case 6: /* logical op Rb,[Rs,]Ra; no literal */
2668 // AROTL and AROTLW are extended mnemonics, which map to RLDCL and RLWNM.
2671 o1 = AOP_RLDIC(OP_RLDCL, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), uint32(0))
2673 o1 = OP_RLW(OP_RLWNM, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), 0, 31)
2675 if p.As == AOR && p.From.Type == obj.TYPE_CONST && p.From.Offset == 0 {
2676 // Compile "OR $0, Rx, Ry" into ori. If Rx == Ry == 0, this is the preferred
2677 // hardware no-op. This happens because $0 matches C_REG before C_ZCON.
2678 o1 = LOP_IRR(OP_ORI, uint32(p.To.Reg), uint32(r), 0)
2680 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2684 case 7: /* mov r, soreg ==> stw o(r) */
2688 r = c.getimpliedreg(&p.To, p)
2690 v := c.regoff(&p.To)
2691 if int32(int16(v)) != v {
2692 log.Fatalf("mishandled instruction %v", p)
2694 // Offsets in DS form stores must be a multiple of 4
2695 inst := c.opstore(p.As)
2696 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
2697 log.Fatalf("invalid offset for DS form load/store %v", p)
2699 o1 = AOP_IRR(inst, uint32(p.From.Reg), uint32(r), uint32(v))
2701 case 8: /* mov soreg, r ==> lbz/lhz/lwz o(r), lbz o(r) + extsb r,r */
2702 r := int(p.From.Reg)
2705 r = c.getimpliedreg(&p.From, p)
2707 v := c.regoff(&p.From)
2708 if int32(int16(v)) != v {
2709 log.Fatalf("mishandled instruction %v", p)
2711 // Offsets in DS form loads must be a multiple of 4
2712 inst := c.opload(p.As)
2713 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
2714 log.Fatalf("invalid offset for DS form load/store %v", p)
2716 o1 = AOP_IRR(inst, uint32(p.To.Reg), uint32(r), uint32(v))
2718 // Sign extend MOVB operations. This is ignored for other cases (o.size == 4).
2719 o2 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
2721 case 10: /* sub Ra,[Rb],Rd => subf Rd,Ra,Rb */
2727 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(r))
2729 case 11: /* br/bl lbra */
2732 if p.To.Target() != nil {
2733 v = int32(p.To.Target().Pc - p.Pc)
2735 c.ctxt.Diag("odd branch target address\n%v", p)
2739 if v < -(1<<25) || v >= 1<<24 {
2740 c.ctxt.Diag("branch too far\n%v", p)
2744 o1 = OP_BR(c.opirr(p.As), uint32(v), 0)
2745 if p.To.Sym != nil {
2746 rel := obj.Addrel(c.cursym)
2747 rel.Off = int32(c.pc)
2750 v += int32(p.To.Offset)
2752 c.ctxt.Diag("odd branch target address\n%v", p)
2757 rel.Type = objabi.R_CALLPOWER
2759 o2 = 0x60000000 // nop, sometimes overwritten by ld r2, 24(r1) when dynamic linking
2761 case 13: /* mov[bhwd]{z,} r,r */
2762 // This needs to handle "MOV* $0, Rx". This shows up because $0 also
2763 // matches C_REG if r0iszero. This happens because C_REG sorts before C_ANDCON
2764 // TODO: fix the above behavior and cleanup this exception.
2765 if p.From.Type == obj.TYPE_CONST {
2766 o1 = LOP_IRR(OP_ADDI, REGZERO, uint32(p.To.Reg), 0)
2769 if p.To.Type == obj.TYPE_CONST {
2770 c.ctxt.Diag("cannot move into constant 0\n%v", p)
2775 o1 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2777 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 24, 31)
2779 o1 = LOP_RRR(OP_EXTSH, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2781 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 16, 31)
2783 o1 = LOP_RRR(OP_EXTSW, uint32(p.To.Reg), uint32(p.From.Reg), 0)
2785 o1 = OP_RLW(OP_RLDIC, uint32(p.To.Reg), uint32(p.From.Reg), 0, 0, 0) | 1<<5 /* MB=32 */
2787 o1 = LOP_RRR(OP_OR, uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.From.Reg))
2789 c.ctxt.Diag("internal: bad register move/truncation\n%v", p)
2792 case 14: /* rldc[lr] Rb,Rs,$mask,Ra -- left, right give different masks */
2798 d := c.vregoff(p.GetFrom3())
2802 // These opcodes expect a mask operand that has to be converted into the
2803 // appropriate operand. The way these were defined, not all valid masks are possible.
2804 // Left here for compatibility in case they were used or generated.
2805 case ARLDCL, ARLDCLCC:
2807 c.maskgen64(p, mask[:], uint64(d))
2809 a = int(mask[0]) /* MB */
2811 c.ctxt.Diag("invalid mask for rotate: %x (end != bit 63)\n%v", uint64(d), p)
2813 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2814 o1 |= (uint32(a) & 31) << 6
2816 o1 |= 1 << 5 /* mb[5] is top bit */
2819 case ARLDCR, ARLDCRCC:
2821 c.maskgen64(p, mask[:], uint64(d))
2823 a = int(mask[1]) /* ME */
2825 c.ctxt.Diag("invalid mask for rotate: %x %x (start != 0)\n%v", uint64(d), mask[0], p)
2827 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
2828 o1 |= (uint32(a) & 31) << 6
2830 o1 |= 1 << 5 /* mb[5] is top bit */
2833 // These opcodes use a shift count like the ppc64 asm, no mask conversion done
2834 case ARLDICR, ARLDICRCC:
2836 sh := c.regoff(&p.From)
2837 if me < 0 || me > 63 || sh > 63 {
2838 c.ctxt.Diag("Invalid me or sh for RLDICR: %x %x\n%v", int(d), sh, p)
2840 o1 = AOP_RLDIC(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(sh), uint32(me))
2842 case ARLDICL, ARLDICLCC, ARLDIC, ARLDICCC:
2844 sh := c.regoff(&p.From)
2845 if mb < 0 || mb > 63 || sh > 63 {
2846 c.ctxt.Diag("Invalid mb or sh for RLDIC, RLDICL: %x %x\n%v", mb, sh, p)
2848 o1 = AOP_RLDIC(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(sh), uint32(mb))
2851 // This is an extended mnemonic defined in the ISA section C.8.1
2852 // clrlsldi ra,rs,b,n --> rldic ra,rs,n,b-n
2853 // It maps onto RLDIC so is directly generated here based on the operands from
2856 b := c.regoff(&p.From)
2857 if n > b || b > 63 {
2858 c.ctxt.Diag("Invalid n or b for CLRLSLDI: %x %x\n%v", n, b, p)
2860 o1 = AOP_RLDIC(OP_RLDIC, uint32(p.To.Reg), uint32(r), uint32(n), uint32(b)-uint32(n))
2863 c.ctxt.Diag("unexpected op in rldc case\n%v", p)
2867 case 17, /* bc bo,bi,lbra (same for now) */
2868 16: /* bc bo,bi,sbra */
2873 if p.From.Type == obj.TYPE_CONST {
2874 a = int(c.regoff(&p.From))
2875 } else if p.From.Type == obj.TYPE_REG {
2877 c.ctxt.Diag("unexpected register setting for branch with CR: %d\n", r)
2879 // BI values for the CR
2898 c.ctxt.Diag("unrecognized register: expecting CR\n")
2902 if p.To.Target() != nil {
2903 v = int32(p.To.Target().Pc - p.Pc)
2906 c.ctxt.Diag("odd branch target address\n%v", p)
2910 if v < -(1<<16) || v >= 1<<15 {
2911 c.ctxt.Diag("branch too far\n%v", p)
2913 o1 = OP_BC(c.opirr(p.As), uint32(a), uint32(r), uint32(v), 0)
2915 case 18: /* br/bl (lr/ctr); bc/bcl bo,bi,(lr/ctr) */
2918 if p.As == ABC || p.As == ABCL {
2919 v = c.regoff(&p.From) & 31
2921 v = 20 /* unconditional */
2927 switch oclass(&p.To) {
2929 o1 = OPVCC(19, 528, 0, 0)
2932 o1 = OPVCC(19, 16, 0, 0)
2935 c.ctxt.Diag("bad optab entry (18): %d\n%v", p.To.Class, p)
2939 // Insert optional branch hint for bclr[l]/bcctr[l]
2940 if p.From3Type() != obj.TYPE_NONE {
2941 bh = uint32(p.GetFrom3().Offset)
2942 if bh == 2 || bh > 3 {
2943 log.Fatalf("BH must be 0,1,3 for %v", p)
2948 if p.As == ABL || p.As == ABCL {
2951 o1 = OP_BCR(o1, uint32(v), uint32(r))
2953 case 19: /* mov $lcon,r ==> cau+or */
2954 d := c.vregoff(&p.From)
2956 o1, o2 = pfxadd(p.To.Reg, REG_R0, PFX_R_ABS, d)
2958 o1 = loadu32(int(p.To.Reg), d)
2959 o2 = LOP_IRR(OP_ORI, uint32(p.To.Reg), uint32(p.To.Reg), uint32(int32(d)))
2962 case 20: /* add $ucon,,r | addis $addcon,r,r */
2963 v := c.regoff(&p.From)
2969 if p.As == AADD && (r0iszero == 0 /*TypeKind(100016)*/ && p.Reg == 0 || r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0) {
2970 c.ctxt.Diag("literal operation on R0\n%v", p)
2973 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
2975 o1 = AOP_IRR(c.opirr(AADDIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
2978 case 22: /* add $lcon/$andcon,r1,r2 ==> oris+ori+add/ori+add */
2979 if p.To.Reg == REGTMP || p.Reg == REGTMP {
2980 c.ctxt.Diag("can't synthesize large constant\n%v", p)
2982 d := c.vregoff(&p.From)
2987 if p.From.Sym != nil {
2988 c.ctxt.Diag("%v is not supported", p)
2990 // If operand is ANDCON, generate 2 instructions using
2991 // ORI for unsigned value; with LCON 3 instructions.
2993 o1 = LOP_IRR(OP_ORI, REGTMP, REGZERO, uint32(int32(d)))
2994 o2 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
2996 o1 = loadu32(REGTMP, d)
2997 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d)))
2998 o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
3002 o1, o2 = pfxadd(int16(p.To.Reg), int16(r), PFX_R_ABS, d)
3005 case 23: /* and $lcon/$addcon,r1,r2 ==> oris+ori+and/addi+and */
3006 if p.To.Reg == REGTMP || p.Reg == REGTMP {
3007 c.ctxt.Diag("can't synthesize large constant\n%v", p)
3009 d := c.vregoff(&p.From)
3015 // With ADDCON operand, generate 2 instructions using ADDI for signed value,
3016 // with LCON operand generate 3 instructions.
3018 o1 = LOP_IRR(OP_ADDI, REGZERO, REGTMP, uint32(int32(d)))
3019 o2 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
3021 o1 = loadu32(REGTMP, d)
3022 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d)))
3023 o3 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
3025 if p.From.Sym != nil {
3026 c.ctxt.Diag("%v is not supported", p)
3029 case 24: /* lfd fA,float64(0) -> xxlxor xsA,xsaA,xsaA + fneg for -0 */
3030 o1 = AOP_XX3I(c.oprrr(AXXLXOR), uint32(p.To.Reg), uint32(p.To.Reg), uint32(p.To.Reg), uint32(0))
3031 // This is needed for -0.
3033 o2 = AOP_RRR(c.oprrr(AFNEG), uint32(p.To.Reg), 0, uint32(p.To.Reg))
3037 /* sld[.] $sh,rS,rA -> rldicr[.] $sh,rS,mask(0,63-sh),rA; srd[.] -> rldicl */
3038 v := c.regoff(&p.From)
3063 case AEXTSWSLI, AEXTSWSLICC:
3066 c.ctxt.Diag("unexpected op in sldi case\n%v", p)
3071 if p.As == AEXTSWSLI || p.As == AEXTSWSLICC {
3072 o1 = AOP_EXTSWSLI(OP_EXTSWSLI, uint32(r), uint32(p.To.Reg), uint32(v))
3075 o1 = AOP_RLDIC(op, uint32(p.To.Reg), uint32(r), uint32(v), uint32(a))
3077 if p.As == ASLDCC || p.As == ASRDCC || p.As == AEXTSWSLICC {
3078 o1 |= 1 // Set the condition code bit
3081 case 26: /* mov $lsext/auto/oreg,,r2 ==> addis+addi */
3082 v := c.vregoff(&p.From)
3083 r := int(p.From.Reg)
3086 switch p.From.Name {
3087 case obj.NAME_EXTERN, obj.NAME_STATIC:
3088 // Load a 32 bit constant, or relocation depending on if a symbol is attached
3089 o1, o2, rel = c.symbolAccess(p.From.Sym, v, p.To.Reg, OP_ADDI, true)
3092 r = c.getimpliedreg(&p.From, p)
3094 // Add a 32 bit offset to a register.
3095 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), uint32(r), uint32(high16adjusted(int32(v))))
3096 o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), uint32(p.To.Reg), uint32(v))
3101 o1, o2 = pfxadd(int16(p.To.Reg), int16(r), PFX_R_ABS, v)
3103 o1, o2 = pfxadd(int16(p.To.Reg), REG_R0, PFX_R_PCREL, 0)
3104 rel.Type = objabi.R_ADDRPOWER_PCREL34
3108 case 27: /* subc ra,$simm,rd => subfic rd,ra,$simm */
3109 v := c.regoff(p.GetFrom3())
3111 r := int(p.From.Reg)
3112 o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3114 case 28: /* subc r1,$lcon,r2 ==> cau+or+subfc */
3115 if p.To.Reg == REGTMP || p.From.Reg == REGTMP {
3116 c.ctxt.Diag("can't synthesize large constant\n%v", p)
3118 v := c.regoff(p.GetFrom3())
3119 o1 = AOP_IRR(OP_ADDIS, REGTMP, REGZERO, uint32(v)>>16)
3120 o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(v))
3121 o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), REGTMP)
3122 if p.From.Sym != nil {
3123 c.ctxt.Diag("%v is not supported", p)
3126 case 29: /* rldic[lr]? $sh,s,$mask,a -- left, right, plain give different masks */
3127 v := c.regoff(&p.From)
3129 d := c.vregoff(p.GetFrom3())
3131 c.maskgen64(p, mask[:], uint64(d))
3134 case ARLDC, ARLDCCC:
3135 a = int(mask[0]) /* MB */
3136 if int32(mask[1]) != (63 - v) {
3137 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[1], v, p)
3140 case ARLDCL, ARLDCLCC:
3141 a = int(mask[0]) /* MB */
3143 c.ctxt.Diag("invalid mask for shift: %x %s (shift %d)\n%v", uint64(d), mask[1], v, p)
3146 case ARLDCR, ARLDCRCC:
3147 a = int(mask[1]) /* ME */
3149 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[0], v, p)
3153 c.ctxt.Diag("unexpected op in rldic case\n%v", p)
3157 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
3158 o1 |= (uint32(a) & 31) << 6
3163 o1 |= 1 << 5 /* mb[5] is top bit */
3166 case 30: /* rldimi $sh,s,$mask,a */
3167 v := c.regoff(&p.From)
3169 d := c.vregoff(p.GetFrom3())
3171 // Original opcodes had mask operands which had to be converted to a shift count as expected by
3174 case ARLDMI, ARLDMICC:
3176 c.maskgen64(p, mask[:], uint64(d))
3177 if int32(mask[1]) != (63 - v) {
3178 c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[1], v, p)
3180 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
3181 o1 |= (uint32(mask[0]) & 31) << 6
3185 if mask[0]&0x20 != 0 {
3186 o1 |= 1 << 5 /* mb[5] is top bit */
3189 // Opcodes with shift count operands.
3190 case ARLDIMI, ARLDIMICC:
3191 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
3192 o1 |= (uint32(d) & 31) << 6
3201 case 31: /* dword */
3202 d := c.vregoff(&p.From)
3204 if c.ctxt.Arch.ByteOrder == binary.BigEndian {
3205 o1 = uint32(d >> 32)
3209 o2 = uint32(d >> 32)
3212 if p.From.Sym != nil {
3213 rel := obj.Addrel(c.cursym)
3214 rel.Off = int32(c.pc)
3216 rel.Sym = p.From.Sym
3217 rel.Add = p.From.Offset
3218 rel.Type = objabi.R_ADDR
3223 case 32: /* fmul frc,fra,frd */
3229 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0) | (uint32(p.From.Reg)&31)<<6
3231 case 33: /* fabs [frb,]frd; fmr. frb,frd */
3232 r := int(p.From.Reg)
3234 if oclass(&p.From) == C_NONE {
3237 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(r))
3239 case 34: /* FMADDx fra,frb,frc,frt (t=a*c±b) */
3240 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg)) | (uint32(p.GetFrom3().Reg)&31)<<6
3242 case 35: /* mov r,lext/lauto/loreg ==> cau $(v>>16),sb,r'; store o(r') */
3243 v := c.regoff(&p.To)
3247 r = c.getimpliedreg(&p.To, p)
3249 // Offsets in DS form stores must be a multiple of 4
3251 o1, o2 = pfxstore(p.As, p.From.Reg, int16(r), PFX_R_ABS)
3252 o1 |= uint32((v >> 16) & 0x3FFFF)
3253 o2 |= uint32(v & 0xFFFF)
3255 inst := c.opstore(p.As)
3256 if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3257 log.Fatalf("invalid offset for DS form load/store %v", p)
3259 o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v)))
3260 o2 = AOP_IRR(inst, uint32(p.From.Reg), REGTMP, uint32(v))
3263 case 36: /* mov b/bz/h/hz lext/lauto/lreg,r ==> lbz+extsb/lbz/lha/lhz etc */
3264 v := c.regoff(&p.From)
3266 r := int(p.From.Reg)
3268 r = c.getimpliedreg(&p.From, p)
3272 o1, o2 = pfxload(p.As, p.To.Reg, int16(r), PFX_R_ABS)
3273 o1 |= uint32((v >> 16) & 0x3FFFF)
3274 o2 |= uint32(v & 0xFFFF)
3277 // Reuse the base register when loading a GPR (C_REG) to avoid
3278 // using REGTMP (R31) when possible.
3279 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), uint32(r), uint32(high16adjusted(v)))
3280 o2 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), uint32(p.To.Reg), uint32(v))
3282 o1 = AOP_IRR(OP_ADDIS, uint32(REGTMP), uint32(r), uint32(high16adjusted(v)))
3283 o2 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), uint32(REGTMP), uint32(v))
3287 // Sign extend MOVB if needed
3288 o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3291 o1 = uint32(c.regoff(&p.From))
3293 case 41: /* stswi */
3294 if p.To.Type == obj.TYPE_MEM && p.To.Index == 0 && p.To.Offset != 0 {
3295 c.ctxt.Diag("Invalid addressing mode used in index type instruction: %v", p.As)
3298 o1 = AOP_RRR(c.opirr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11
3301 if p.From.Type == obj.TYPE_MEM && p.From.Index == 0 && p.From.Offset != 0 {
3302 c.ctxt.Diag("Invalid addressing mode used in index type instruction: %v", p.As)
3304 o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11
3306 case 43: /* data cache instructions: op (Ra+[Rb]), [th|l] */
3307 /* TH field for dcbt/dcbtst: */
3308 /* 0 = Block access - program will soon access EA. */
3309 /* 8-15 = Stream access - sequence of access (data stream). See section 4.3.2 of the ISA for details. */
3310 /* 16 = Block access - program will soon make a transient access to EA. */
3311 /* 17 = Block access - program will not access EA for a long time. */
3313 /* L field for dcbf: */
3314 /* 0 = invalidates the block containing EA in all processors. */
3315 /* 1 = same as 0, but with limited scope (i.e. block in the current processor will not be reused soon). */
3316 /* 3 = same as 1, but with even more limited scope (i.e. block in the current processor primary cache will not be reused soon). */
3317 if p.To.Type == obj.TYPE_NONE {
3318 o1 = AOP_RRR(c.oprrr(p.As), 0, uint32(p.From.Index), uint32(p.From.Reg))
3320 th := c.regoff(&p.To)
3321 o1 = AOP_RRR(c.oprrr(p.As), uint32(th), uint32(p.From.Index), uint32(p.From.Reg))
3324 case 44: /* indexed store */
3325 o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg))
3327 case 45: /* indexed load */
3329 /* The assembler accepts a 4-operand l*arx instruction. The fourth operand is an Exclusive Access Hint (EH) */
3330 /* The EH field can be used as a lock acquire/release hint as follows: */
3331 /* 0 = Atomic Update (fetch-and-operate or similar algorithm) */
3332 /* 1 = Exclusive Access (lock acquire and release) */
3333 case ALBAR, ALHAR, ALWAR, ALDAR:
3334 if p.From3Type() != obj.TYPE_NONE {
3335 eh := int(c.regoff(p.GetFrom3()))
3337 c.ctxt.Diag("illegal EH field\n%v", p)
3339 o1 = AOP_RRRI(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg), uint32(eh))
3341 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3344 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3346 case 46: /* plain op */
3349 case 47: /* op Ra, Rd; also op [Ra,] Rd */
3350 r := int(p.From.Reg)
3355 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0)
3357 case 48: /* op Rs, Ra */
3358 r := int(p.From.Reg)
3363 o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0)
3365 case 49: /* op Rb; op $n, Rb */
3366 if p.From.Type != obj.TYPE_REG { /* tlbie $L, rB */
3367 v := c.regoff(&p.From) & 1
3368 o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.To.Reg)) | uint32(v)<<21
3370 o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.From.Reg))
3373 case 50: /* rem[u] r1[,r2],r3 */
3380 t := v & (1<<10 | 1) /* OE|Rc */
3381 o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg))
3382 o2 = AOP_RRR(OP_MULLW, REGTMP, REGTMP, uint32(p.From.Reg))
3383 o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r))
3387 /* Clear top 32 bits */
3388 o3 = OP_RLW(OP_RLDIC, REGTMP, REGTMP, 0, 0, 0) | 1<<5
3391 case 51: /* remd[u] r1[,r2],r3 */
3398 t := v & (1<<10 | 1) /* OE|Rc */
3399 o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg))
3400 o2 = AOP_RRR(OP_MULLD, REGTMP, REGTMP, uint32(p.From.Reg))
3401 o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r))
3402 /* cases 50,51: removed; can be reused. */
3404 /* cases 50,51: removed; can be reused. */
3406 case 52: /* mtfsbNx cr(n) */
3407 v := c.regoff(&p.From) & 31
3409 o1 = AOP_RRR(c.oprrr(p.As), uint32(v), 0, 0)
3411 case 53: /* mffsX ,fr1 */
3412 o1 = AOP_RRR(OP_MFFS, uint32(p.To.Reg), 0, 0)
3414 case 55: /* op Rb, Rd */
3415 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(p.From.Reg))
3417 case 56: /* sra $sh,[s,]a; srd $sh,[s,]a */
3418 v := c.regoff(&p.From)
3424 o1 = AOP_RRR(c.opirr(p.As), uint32(r), uint32(p.To.Reg), uint32(v)&31)
3425 if (p.As == ASRAD || p.As == ASRADCC) && (v&0x20 != 0) {
3426 o1 |= 1 << 1 /* mb[5] */
3429 case 57: /* slw $sh,[s,]a -> rlwinm ... */
3430 v := c.regoff(&p.From)
3438 * Let user (gs) shoot himself in the foot.
3439 * qc has already complained.
3442 ctxt->diag("illegal shift %ld\n%v", v, p);
3452 mask[0], mask[1] = 0, 31
3454 mask[0], mask[1] = uint8(v), 31
3457 mask[0], mask[1] = 0, uint8(31-v)
3459 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(r), uint32(v), uint32(mask[0]), uint32(mask[1]))
3460 if p.As == ASLWCC || p.As == ASRWCC {
3461 o1 |= 1 // set the condition code
3464 case 58: /* logical $andcon,[s],a */
3465 v := c.regoff(&p.From)
3471 o1 = LOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3473 case 59: /* or/xor/and $ucon,,r | oris/xoris/andis $addcon,r,r */
3474 v := c.regoff(&p.From)
3482 o1 = LOP_IRR(c.opirr(AORIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16) /* oris, xoris, andis. */
3484 o1 = LOP_IRR(c.opirr(AXORIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
3486 o1 = LOP_IRR(c.opirr(AANDISCC), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
3488 o1 = LOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
3491 case 60: /* tw to,a,b */
3492 r := int(c.regoff(&p.From) & 31)
3494 o1 = AOP_RRR(c.oprrr(p.As), uint32(r), uint32(p.Reg), uint32(p.To.Reg))
3496 case 61: /* tw to,a,$simm */
3497 r := int(c.regoff(&p.From) & 31)
3499 v := c.regoff(&p.To)
3500 o1 = AOP_IRR(c.opirr(p.As), uint32(r), uint32(p.Reg), uint32(v))
3502 case 62: /* rlwmi $sh,s,$mask,a */
3503 v := c.regoff(&p.From)
3506 n := c.regoff(p.GetFrom3())
3507 // This is an extended mnemonic described in the ISA C.8.2
3508 // clrlslwi ra,rs,b,n -> rlwinm ra,rs,n,b-n,31-n
3509 // It maps onto rlwinm which is directly generated here.
3510 if n > v || v >= 32 {
3511 c.ctxt.Diag("Invalid n or b for CLRLSLWI: %x %x\n%v", v, n, p)
3514 o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.Reg), uint32(n), uint32(v-n), uint32(31-n))
3517 c.maskgen(p, mask[:], uint32(c.regoff(p.GetFrom3())))
3518 o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), uint32(v))
3519 o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1
3522 case 63: /* rlwmi b,s,$mask,a */
3524 c.maskgen(p, mask[:], uint32(c.regoff(p.GetFrom3())))
3525 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.Reg), uint32(p.To.Reg), uint32(p.From.Reg))
3526 o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1
3528 case 64: /* mtfsf fr[, $m] {,fpcsr} */
3530 if p.From3Type() != obj.TYPE_NONE {
3531 v = c.regoff(p.GetFrom3()) & 255
3535 o1 = OP_MTFSF | uint32(v)<<17 | uint32(p.From.Reg)<<11
3537 case 65: /* MOVFL $imm,FPSCR(n) => mtfsfi crfd,imm */
3539 c.ctxt.Diag("must specify FPSCR(n)\n%v", p)
3541 o1 = OP_MTFSFI | (uint32(p.To.Reg)&15)<<23 | (uint32(c.regoff(&p.From))&31)<<12
3543 case 66: /* mov spr,r1; mov r1,spr */
3546 if REG_R0 <= p.From.Reg && p.From.Reg <= REG_R31 {
3549 o1 = OPVCC(31, 467, 0, 0) /* mtspr */
3552 v = int32(p.From.Reg)
3553 o1 = OPVCC(31, 339, 0, 0) /* mfspr */
3556 o1 = AOP_RRR(o1, uint32(r), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11
3558 case 67: /* mcrf crfD,crfS */
3559 if p.From.Reg == REG_CR || p.To.Reg == REG_CR {
3560 c.ctxt.Diag("CR argument must be a conditional register field (CR0-CR7)\n%v", p)
3562 o1 = AOP_RRR(OP_MCRF, ((uint32(p.To.Reg) & 7) << 2), ((uint32(p.From.Reg) & 7) << 2), 0)
3564 case 68: /* mfcr rD; mfocrf CRM,rD */
3565 o1 = AOP_RRR(OP_MFCR, uint32(p.To.Reg), 0, 0) /* form, whole register */
3566 if p.From.Reg != REG_CR {
3567 v := uint32(1) << uint(7-(p.From.Reg&7)) /* CR(n) */
3568 o1 |= 1<<20 | v<<12 /* new form, mfocrf */
3571 case 69: /* mtcrf CRM,rS, mtocrf CRx,rS */
3573 if p.To.Reg == REG_CR {
3575 } else if p.To.Offset != 0 { // MOVFL gpr, constant
3576 v = uint32(p.To.Offset)
3577 } else { // p.To.Reg == REG_CRx
3578 v = 1 << uint(7-(p.To.Reg&7))
3580 // Use mtocrf form if only one CR field moved.
3581 if bits.OnesCount32(v) == 1 {
3585 o1 = AOP_RRR(OP_MTCRF, uint32(p.From.Reg), 0, 0) | uint32(v)<<12
3587 case 70: /* [f]cmp r,r,cr*/
3592 r = (int(p.Reg) & 7) << 2
3594 o1 = AOP_RRR(c.oprrr(p.As), uint32(r), uint32(p.From.Reg), uint32(p.To.Reg))
3596 case 71: /* cmp[l] r,i,cr*/
3601 r = (int(p.Reg) & 7) << 2
3603 o1 = AOP_RRR(c.opirr(p.As), uint32(r), uint32(p.From.Reg), 0) | uint32(c.regoff(&p.To))&0xffff
3605 case 72: /* slbmte (Rb+Rs -> slb[Rb]) -> Rs, Rb */
3606 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), 0, uint32(p.To.Reg))
3608 case 73: /* mcrfs crfD,crfS */
3609 if p.From.Type != obj.TYPE_REG || p.From.Reg != REG_FPSCR || p.To.Type != obj.TYPE_REG || p.To.Reg < REG_CR0 || REG_CR7 < p.To.Reg {
3610 c.ctxt.Diag("illegal FPSCR/CR field number\n%v", p)
3612 o1 = AOP_RRR(OP_MCRFS, ((uint32(p.To.Reg) & 7) << 2), ((0 & 7) << 2), 0)
3614 case 77: /* syscall $scon, syscall Rx */
3615 if p.From.Type == obj.TYPE_CONST {
3616 if p.From.Offset > BIG || p.From.Offset < -BIG {
3617 c.ctxt.Diag("illegal syscall, sysnum too large: %v", p)
3619 o1 = AOP_IRR(OP_ADDI, REGZERO, REGZERO, uint32(p.From.Offset))
3620 } else if p.From.Type == obj.TYPE_REG {
3621 o1 = LOP_RRR(OP_OR, REGZERO, uint32(p.From.Reg), uint32(p.From.Reg))
3623 c.ctxt.Diag("illegal syscall: %v", p)
3624 o1 = 0x7fe00008 // trap always
3628 o3 = AOP_RRR(c.oprrr(AXOR), REGZERO, REGZERO, REGZERO) // XOR R0, R0
3630 case 78: /* undef */
3631 o1 = 0 /* "An instruction consisting entirely of binary 0s is guaranteed
3632 always to be an illegal instruction." */
3634 /* relocation operations */
3637 v := c.vregoff(&p.To)
3638 // Offsets in DS form stores must be a multiple of 4
3639 inst := c.opstore(p.As)
3641 // Can't reuse base for store instructions.
3642 o1, o2, rel = c.symbolAccess(p.To.Sym, v, p.From.Reg, inst, false)
3644 // Rewrite as a prefixed store if supported.
3646 o1, o2 = pfxstore(p.As, p.From.Reg, REG_R0, PFX_R_PCREL)
3647 rel.Type = objabi.R_ADDRPOWER_PCREL34
3648 } else if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3649 log.Fatalf("invalid offset for DS form load/store %v", p)
3652 case 75: // 32 bit offset symbol loads (got/toc/addr)
3656 // Offsets in DS form loads must be a multiple of 4
3657 inst := c.opload(p.As)
3658 switch p.From.Name {
3659 case obj.NAME_GOTREF, obj.NAME_TOCREF:
3661 c.ctxt.Diag("invalid offset for GOT/TOC access %v", p)
3663 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
3664 o2 = AOP_IRR(inst, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3665 rel = obj.Addrel(c.cursym)
3666 rel.Off = int32(c.pc)
3668 rel.Sym = p.From.Sym
3669 switch p.From.Name {
3670 case obj.NAME_GOTREF:
3671 rel.Type = objabi.R_ADDRPOWER_GOT
3672 case obj.NAME_TOCREF:
3673 rel.Type = objabi.R_ADDRPOWER_TOCREL_DS
3676 reuseBaseReg := o.a6 == C_REG
3677 // Reuse To.Reg as base register if it is a GPR.
3678 o1, o2, rel = c.symbolAccess(p.From.Sym, v, p.To.Reg, inst, reuseBaseReg)
3681 // Convert to prefixed forms if supported.
3684 case objabi.R_ADDRPOWER, objabi.R_ADDRPOWER_DS,
3685 objabi.R_ADDRPOWER_TOCREL, objabi.R_ADDRPOWER_TOCREL_DS:
3686 o1, o2 = pfxload(p.As, p.To.Reg, REG_R0, PFX_R_PCREL)
3687 rel.Type = objabi.R_ADDRPOWER_PCREL34
3688 case objabi.R_POWER_TLS_IE:
3689 o1, o2 = pfxload(p.As, p.To.Reg, REG_R0, PFX_R_PCREL)
3690 rel.Type = objabi.R_POWER_TLS_IE_PCREL34
3691 case objabi.R_ADDRPOWER_GOT:
3692 o1, o2 = pfxload(p.As, p.To.Reg, REG_R0, PFX_R_PCREL)
3693 rel.Type = objabi.R_ADDRPOWER_GOT_PCREL34
3695 // We've failed to convert a TOC-relative relocation to a PC-relative one.
3696 log.Fatalf("Unable convert TOC-relative relocation %v to PC-relative", rel.Type)
3698 } else if c.opform(inst) == DS_FORM && v&0x3 != 0 {
3699 log.Fatalf("invalid offset for DS form load/store %v", p)
3702 o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3705 if p.From.Offset != 0 {
3706 c.ctxt.Diag("invalid offset against tls var %v", p)
3708 rel := obj.Addrel(c.cursym)
3709 rel.Off = int32(c.pc)
3711 rel.Sym = p.From.Sym
3713 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R13, 0)
3714 o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3715 rel.Type = objabi.R_POWER_TLS_LE
3717 o1, o2 = pfxadd(p.To.Reg, REG_R13, PFX_R_ABS, 0)
3718 rel.Type = objabi.R_POWER_TLS_LE_TPREL34
3722 if p.From.Offset != 0 {
3723 c.ctxt.Diag("invalid offset against tls var %v", p)
3725 rel := obj.Addrel(c.cursym)
3726 rel.Off = int32(c.pc)
3728 rel.Sym = p.From.Sym
3729 rel.Type = objabi.R_POWER_TLS_IE
3731 o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
3732 o2 = AOP_IRR(c.opload(AMOVD), uint32(p.To.Reg), uint32(p.To.Reg), 0)
3734 o1, o2 = pfxload(p.As, p.To.Reg, REG_R0, PFX_R_PCREL)
3735 rel.Type = objabi.R_POWER_TLS_IE_PCREL34
3737 o3 = AOP_RRR(OP_ADD, uint32(p.To.Reg), uint32(p.To.Reg), REG_R13)
3738 rel = obj.Addrel(c.cursym)
3739 rel.Off = int32(c.pc) + 8
3741 rel.Sym = p.From.Sym
3742 rel.Type = objabi.R_POWER_TLS
3744 case 82: /* vector instructions, VX-form and VC-form */
3745 if p.From.Type == obj.TYPE_REG {
3746 /* reg reg none OR reg reg reg */
3747 /* 3-register operand order: VRA, VRB, VRT */
3748 /* 2-register operand order: VRA, VRT */
3749 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3750 } else if p.From3Type() == obj.TYPE_CONST {
3751 /* imm imm reg reg */
3752 /* operand order: SIX, VRA, ST, VRT */
3753 six := int(c.regoff(&p.From))
3754 st := int(c.regoff(p.GetFrom3()))
3755 o1 = AOP_IIRR(c.opiirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(st), uint32(six))
3756 } else if p.From3Type() == obj.TYPE_NONE && p.Reg != 0 {
3758 /* operand order: UIM, VRB, VRT */
3759 uim := int(c.regoff(&p.From))
3760 o1 = AOP_VIRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(uim))
3763 /* operand order: SIM, VRT */
3764 sim := int(c.regoff(&p.From))
3765 o1 = AOP_IR(c.opirr(p.As), uint32(p.To.Reg), uint32(sim))
3768 case 83: /* vector instructions, VA-form */
3769 if p.From.Type == obj.TYPE_REG {
3770 /* reg reg reg reg */
3771 /* 4-register operand order: VRA, VRB, VRC, VRT */
3772 o1 = AOP_RRRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg))
3773 } else if p.From.Type == obj.TYPE_CONST {
3774 /* imm reg reg reg */
3775 /* operand order: SHB, VRA, VRB, VRT */
3776 shb := int(c.regoff(&p.From))
3777 o1 = AOP_IRRR(c.opirrr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(shb))
3780 case 84: // ISEL BC,RA,RB,RT -> isel rt,ra,rb,bc
3781 bc := c.vregoff(&p.From)
3782 if o.a1 == C_CRBIT {
3783 // CR bit is encoded as a register, not a constant.
3784 bc = int64(p.From.Reg)
3787 // rt = To.Reg, ra = p.Reg, rb = p.From3.Reg
3788 o1 = AOP_ISEL(OP_ISEL, uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(bc))
3790 case 85: /* vector instructions, VX-form */
3792 /* 2-register operand order: VRB, VRT */
3793 o1 = AOP_RR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg))
3795 case 86: /* VSX indexed store, XX1-form */
3797 /* 3-register operand order: XT, (RB)(RA*1) */
3798 o1 = AOP_XX1(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg))
3800 case 87: /* VSX indexed load, XX1-form */
3802 /* 3-register operand order: (RB)(RA*1), XT */
3803 o1 = AOP_XX1(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
3805 case 88: /* VSX mfvsr* instructions, XX1-form XS,RA */
3806 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3808 case 89: /* VSX instructions, XX2-form */
3809 /* reg none reg OR reg imm reg */
3810 /* 2-register operand order: XB, XT or XB, UIM, XT*/
3811 uim := int(c.regoff(p.GetFrom3()))
3812 o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(uim), uint32(p.From.Reg))
3814 case 90: /* VSX instructions, XX3-form */
3815 if p.From3Type() == obj.TYPE_NONE {
3817 /* 3-register operand order: XA, XB, XT */
3818 o1 = AOP_XX3(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3819 } else if p.From3Type() == obj.TYPE_CONST {
3820 /* reg reg reg imm */
3821 /* operand order: XA, XB, DM, XT */
3822 dm := int(c.regoff(p.GetFrom3()))
3823 o1 = AOP_XX3I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(dm))
3826 case 91: /* VSX instructions, XX4-form */
3827 /* reg reg reg reg */
3828 /* 3-register operand order: XA, XB, XC, XT */
3829 o1 = AOP_XX4(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg))
3831 case 92: /* X-form instructions, 3-operands */
3832 if p.To.Type == obj.TYPE_CONST {
3834 xf := int32(p.From.Reg)
3835 if REG_F0 <= xf && xf <= REG_F31 {
3836 /* operand order: FRA, FRB, BF */
3837 bf := int(c.regoff(&p.To)) << 2
3838 o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg))
3840 /* operand order: RA, RB, L */
3841 l := int(c.regoff(&p.To))
3842 o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.From.Reg), uint32(p.Reg))
3844 } else if p.From3Type() == obj.TYPE_CONST {
3846 /* operand order: RB, L, RA */
3847 l := int(c.regoff(p.GetFrom3()))
3848 o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.To.Reg), uint32(p.From.Reg))
3849 } else if p.To.Type == obj.TYPE_REG {
3850 cr := int32(p.To.Reg)
3851 if REG_CR0 <= cr && cr <= REG_CR7 {
3853 /* operand order: RA, RB, BF */
3854 bf := (int(p.To.Reg) & 7) << 2
3855 o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg))
3856 } else if p.From.Type == obj.TYPE_CONST {
3858 /* operand order: L, RT */
3859 l := int(c.regoff(&p.From))
3860 o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(l), uint32(p.Reg))
3863 case ACOPY, APASTECC:
3864 o1 = AOP_RRR(c.opirr(p.As), uint32(1), uint32(p.From.Reg), uint32(p.To.Reg))
3867 /* operand order: RS, RB, RA */
3868 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3873 case 93: /* X-form instructions, 2-operands */
3874 if p.To.Type == obj.TYPE_CONST {
3876 /* operand order: FRB, BF */
3877 bf := int(c.regoff(&p.To)) << 2
3878 o1 = AOP_RR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg))
3879 } else if p.Reg == 0 {
3880 /* popcnt* r,r, X-form */
3881 /* operand order: RS, RA */
3882 o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
3885 case 94: /* Z23-form instructions, 4-operands */
3886 /* reg reg reg imm */
3887 /* operand order: RA, RB, CY, RT */
3888 cy := int(c.regoff(p.GetFrom3()))
3889 o1 = AOP_Z23I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(cy))
3891 case 96: /* VSX load, DQ-form */
3893 /* operand order: (RA)(DQ), XT */
3894 dq := int16(c.regoff(&p.From))
3896 c.ctxt.Diag("invalid offset for DQ form load/store %v", dq)
3898 o1 = AOP_DQ(c.opload(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(dq))
3900 case 97: /* VSX store, DQ-form */
3902 /* operand order: XT, (RA)(DQ) */
3903 dq := int16(c.regoff(&p.To))
3905 c.ctxt.Diag("invalid offset for DQ form load/store %v", dq)
3907 o1 = AOP_DQ(c.opstore(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(dq))
3908 case 98: /* VSX indexed load or load with length (also left-justified), x-form */
3909 /* vsreg, reg, reg */
3910 o1 = AOP_XX1(c.opload(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3911 case 99: /* VSX store with length (also left-justified) x-form */
3912 /* reg, reg, vsreg */
3913 o1 = AOP_XX1(c.opstore(p.As), uint32(p.From.Reg), uint32(p.Reg), uint32(p.To.Reg))
3914 case 100: /* VSX X-form XXSPLTIB */
3915 if p.From.Type == obj.TYPE_CONST {
3917 uim := int(c.regoff(&p.From))
3919 /* Use AOP_XX1 form with 0 for one of the registers. */
3920 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.To.Reg), uint32(0), uint32(uim))
3922 c.ctxt.Diag("invalid ops for %v", p.As)
3925 o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(0), uint32(p.From.Reg))
3927 case 102: /* RLWMI $sh,rs,$mb,$me,rt (M-form opcode)*/
3928 mb := uint32(c.regoff(&p.RestArgs[0].Addr))
3929 me := uint32(c.regoff(&p.RestArgs[1].Addr))
3930 sh := uint32(c.regoff(&p.From))
3931 o1 = OP_RLW(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), sh, mb, me)
3933 case 103: /* RLWNM rb,rs,$mb,$me,rt (M-form opcode)*/
3934 mb := uint32(c.regoff(&p.RestArgs[0].Addr))
3935 me := uint32(c.regoff(&p.RestArgs[1].Addr))
3936 o1 = OP_RLW(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.From.Reg), mb, me)
3938 case 104: /* VSX mtvsr* instructions, XX1-form RA,RB,XT */
3939 o1 = AOP_XX1(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
3941 case 106: /* MOVD spr, soreg */
3942 v := int32(p.From.Reg)
3943 o1 = OPVCC(31, 339, 0, 0) /* mfspr */
3944 o1 = AOP_RRR(o1, uint32(REGTMP), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11
3945 so := c.regoff(&p.To)
3946 o2 = AOP_IRR(c.opstore(AMOVD), uint32(REGTMP), uint32(p.To.Reg), uint32(so))
3948 log.Fatalf("invalid offset for DS form load/store %v", p)
3950 if p.To.Reg == REGTMP {
3951 log.Fatalf("SPR move to memory will clobber R31 %v", p)
3954 case 107: /* MOVD soreg, spr */
3955 v := int32(p.From.Reg)
3956 so := c.regoff(&p.From)
3957 o1 = AOP_IRR(c.opload(AMOVD), uint32(REGTMP), uint32(v), uint32(so))
3958 o2 = OPVCC(31, 467, 0, 0) /* mtspr */
3960 o2 = AOP_RRR(o2, uint32(REGTMP), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11
3962 log.Fatalf("invalid offset for DS form load/store %v", p)
3965 case 108: /* mov r, xoreg ==> stwx rx,ry */
3967 o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(r))
3969 case 109: /* mov xoreg, r ==> lbzx/lhzx/lwzx rx,ry, lbzx rx,ry + extsb r,r */
3970 r := int(p.From.Reg)
3972 o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(r))
3973 // Sign extend MOVB operations. This is ignored for other cases (o.size == 4).
3974 o2 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
3976 case 110: /* SETB creg, rt */
3977 bfa := uint32(p.From.Reg) << 2
3978 rt := uint32(p.To.Reg)
3979 o1 = LOP_RRR(OP_SETB, bfa, rt, 0)
3989 func (c *ctxt9) vregoff(a *obj.Addr) int64 {
3997 func (c *ctxt9) regoff(a *obj.Addr) int32 {
3998 return int32(c.vregoff(a))
4001 func (c *ctxt9) oprrr(a obj.As) uint32 {
4004 return OPVCC(31, 266, 0, 0)
4006 return OPVCC(31, 266, 0, 1)
4008 return OPVCC(31, 266, 1, 0)
4010 return OPVCC(31, 266, 1, 1)
4012 return OPVCC(31, 10, 0, 0)
4014 return OPVCC(31, 10, 0, 1)
4016 return OPVCC(31, 10, 1, 0)
4018 return OPVCC(31, 10, 1, 1)
4020 return OPVCC(31, 138, 0, 0)
4022 return OPVCC(31, 138, 0, 1)
4024 return OPVCC(31, 138, 1, 0)
4026 return OPVCC(31, 138, 1, 1)
4028 return OPVCC(31, 234, 0, 0)
4030 return OPVCC(31, 234, 0, 1)
4032 return OPVCC(31, 234, 1, 0)
4034 return OPVCC(31, 234, 1, 1)
4036 return OPVCC(31, 202, 0, 0)
4038 return OPVCC(31, 202, 0, 1)
4040 return OPVCC(31, 202, 1, 0)
4042 return OPVCC(31, 202, 1, 1)
4044 return OPVCC(31, 170, 0, 0) /* addex - v3.0b */
4047 return OPVCC(31, 28, 0, 0)
4049 return OPVCC(31, 28, 0, 1)
4051 return OPVCC(31, 60, 0, 0)
4053 return OPVCC(31, 60, 0, 1)
4056 return OPVCC(31, 0, 0, 0) | 1<<21 /* L=1 */
4058 return OPVCC(31, 32, 0, 0) | 1<<21
4060 return OPVCC(31, 0, 0, 0) /* L=0 */
4062 return OPVCC(31, 32, 0, 0)
4064 return OPVCC(31, 508, 0, 0) /* cmpb - v2.05 */
4066 return OPVCC(31, 224, 0, 0) /* cmpeqb - v3.00 */
4069 return OPVCC(31, 26, 0, 0)
4071 return OPVCC(31, 26, 0, 1)
4073 return OPVCC(31, 58, 0, 0)
4075 return OPVCC(31, 58, 0, 1)
4078 return OPVCC(19, 257, 0, 0)
4080 return OPVCC(19, 129, 0, 0)
4082 return OPVCC(19, 289, 0, 0)
4084 return OPVCC(19, 225, 0, 0)
4086 return OPVCC(19, 33, 0, 0)
4088 return OPVCC(19, 449, 0, 0)
4090 return OPVCC(19, 417, 0, 0)
4092 return OPVCC(19, 193, 0, 0)
4095 return OPVCC(31, 86, 0, 0)
4097 return OPVCC(31, 470, 0, 0)
4099 return OPVCC(31, 54, 0, 0)
4101 return OPVCC(31, 278, 0, 0)
4103 return OPVCC(31, 246, 0, 0)
4105 return OPVCC(31, 1014, 0, 0)
4108 return OPVCC(31, 265, 0, 0) /* modud - v3.0 */
4110 return OPVCC(31, 267, 0, 0) /* moduw - v3.0 */
4112 return OPVCC(31, 777, 0, 0) /* modsd - v3.0 */
4114 return OPVCC(31, 779, 0, 0) /* modsw - v3.0 */
4117 return OPVCC(31, 491, 0, 0)
4120 return OPVCC(31, 491, 0, 1)
4123 return OPVCC(31, 491, 1, 0)
4126 return OPVCC(31, 491, 1, 1)
4129 return OPVCC(31, 459, 0, 0)
4132 return OPVCC(31, 459, 0, 1)
4135 return OPVCC(31, 459, 1, 0)
4138 return OPVCC(31, 459, 1, 1)
4141 return OPVCC(31, 489, 0, 0)
4144 return OPVCC(31, 489, 0, 1)
4147 return OPVCC(31, 425, 0, 0)
4150 return OPVCC(31, 425, 0, 1)
4153 return OPVCC(31, 393, 0, 0)
4156 return OPVCC(31, 393, 0, 1)
4159 return OPVCC(31, 489, 1, 0)
4162 return OPVCC(31, 489, 1, 1)
4164 case ADIVDU, AREMDU:
4165 return OPVCC(31, 457, 0, 0)
4168 return OPVCC(31, 457, 0, 1)
4171 return OPVCC(31, 457, 1, 0)
4174 return OPVCC(31, 457, 1, 1)
4177 return OPVCC(31, 854, 0, 0)
4180 return OPVCC(31, 284, 0, 0)
4182 return OPVCC(31, 284, 0, 1)
4185 return OPVCC(31, 954, 0, 0)
4187 return OPVCC(31, 954, 0, 1)
4189 return OPVCC(31, 922, 0, 0)
4191 return OPVCC(31, 922, 0, 1)
4193 return OPVCC(31, 986, 0, 0)
4195 return OPVCC(31, 986, 0, 1)
4198 return OPVCC(63, 264, 0, 0)
4200 return OPVCC(63, 264, 0, 1)
4202 return OPVCC(63, 21, 0, 0)
4204 return OPVCC(63, 21, 0, 1)
4206 return OPVCC(59, 21, 0, 0)
4208 return OPVCC(59, 21, 0, 1)
4210 return OPVCC(63, 32, 0, 0)
4212 return OPVCC(63, 0, 0, 0)
4214 return OPVCC(63, 846, 0, 0)
4216 return OPVCC(63, 846, 0, 1)
4218 return OPVCC(63, 974, 0, 0)
4220 return OPVCC(63, 974, 0, 1)
4222 return OPVCC(59, 846, 0, 0)
4224 return OPVCC(59, 846, 0, 1)
4226 return OPVCC(63, 14, 0, 0)
4228 return OPVCC(63, 14, 0, 1)
4230 return OPVCC(63, 15, 0, 0)
4232 return OPVCC(63, 15, 0, 1)
4234 return OPVCC(63, 814, 0, 0)
4236 return OPVCC(63, 814, 0, 1)
4238 return OPVCC(63, 815, 0, 0)
4240 return OPVCC(63, 815, 0, 1)
4242 return OPVCC(63, 18, 0, 0)
4244 return OPVCC(63, 18, 0, 1)
4246 return OPVCC(59, 18, 0, 0)
4248 return OPVCC(59, 18, 0, 1)
4250 return OPVCC(63, 29, 0, 0)
4252 return OPVCC(63, 29, 0, 1)
4254 return OPVCC(59, 29, 0, 0)
4256 return OPVCC(59, 29, 0, 1)
4258 case AFMOVS, AFMOVD:
4259 return OPVCC(63, 72, 0, 0) /* load */
4261 return OPVCC(63, 72, 0, 1)
4263 return OPVCC(63, 28, 0, 0)
4265 return OPVCC(63, 28, 0, 1)
4267 return OPVCC(59, 28, 0, 0)
4269 return OPVCC(59, 28, 0, 1)
4271 return OPVCC(63, 25, 0, 0)
4273 return OPVCC(63, 25, 0, 1)
4275 return OPVCC(59, 25, 0, 0)
4277 return OPVCC(59, 25, 0, 1)
4279 return OPVCC(63, 136, 0, 0)
4281 return OPVCC(63, 136, 0, 1)
4283 return OPVCC(63, 40, 0, 0)
4285 return OPVCC(63, 40, 0, 1)
4287 return OPVCC(63, 31, 0, 0)
4289 return OPVCC(63, 31, 0, 1)
4291 return OPVCC(59, 31, 0, 0)
4293 return OPVCC(59, 31, 0, 1)
4295 return OPVCC(63, 30, 0, 0)
4297 return OPVCC(63, 30, 0, 1)
4299 return OPVCC(59, 30, 0, 0)
4301 return OPVCC(59, 30, 0, 1)
4303 return OPVCC(63, 8, 0, 0)
4305 return OPVCC(63, 8, 0, 1)
4307 return OPVCC(59, 24, 0, 0)
4309 return OPVCC(59, 24, 0, 1)
4311 return OPVCC(63, 488, 0, 0)
4313 return OPVCC(63, 488, 0, 1)
4315 return OPVCC(63, 456, 0, 0)
4317 return OPVCC(63, 456, 0, 1)
4319 return OPVCC(63, 424, 0, 0)
4321 return OPVCC(63, 424, 0, 1)
4323 return OPVCC(63, 392, 0, 0)
4325 return OPVCC(63, 392, 0, 1)
4327 return OPVCC(63, 12, 0, 0)
4329 return OPVCC(63, 12, 0, 1)
4331 return OPVCC(63, 26, 0, 0)
4333 return OPVCC(63, 26, 0, 1)
4335 return OPVCC(63, 23, 0, 0)
4337 return OPVCC(63, 23, 0, 1)
4339 return OPVCC(63, 22, 0, 0)
4341 return OPVCC(63, 22, 0, 1)
4343 return OPVCC(59, 22, 0, 0)
4345 return OPVCC(59, 22, 0, 1)
4347 return OPVCC(63, 20, 0, 0)
4349 return OPVCC(63, 20, 0, 1)
4351 return OPVCC(59, 20, 0, 0)
4353 return OPVCC(59, 20, 0, 1)
4356 return OPVCC(31, 982, 0, 0)
4358 return OPVCC(19, 150, 0, 0)
4361 return OPVCC(63, 70, 0, 0)
4363 return OPVCC(63, 70, 0, 1)
4365 return OPVCC(63, 38, 0, 0)
4367 return OPVCC(63, 38, 0, 1)
4370 return OPVCC(31, 75, 0, 0)
4372 return OPVCC(31, 75, 0, 1)
4374 return OPVCC(31, 11, 0, 0)
4376 return OPVCC(31, 11, 0, 1)
4378 return OPVCC(31, 235, 0, 0)
4380 return OPVCC(31, 235, 0, 1)
4382 return OPVCC(31, 235, 1, 0)
4384 return OPVCC(31, 235, 1, 1)
4387 return OPVCC(31, 73, 0, 0)
4389 return OPVCC(31, 73, 0, 1)
4391 return OPVCC(31, 9, 0, 0)
4393 return OPVCC(31, 9, 0, 1)
4395 return OPVCC(31, 233, 0, 0)
4397 return OPVCC(31, 233, 0, 1)
4399 return OPVCC(31, 233, 1, 0)
4401 return OPVCC(31, 233, 1, 1)
4404 return OPVCC(31, 476, 0, 0)
4406 return OPVCC(31, 476, 0, 1)
4408 return OPVCC(31, 104, 0, 0)
4410 return OPVCC(31, 104, 0, 1)
4412 return OPVCC(31, 104, 1, 0)
4414 return OPVCC(31, 104, 1, 1)
4416 return OPVCC(31, 124, 0, 0)
4418 return OPVCC(31, 124, 0, 1)
4420 return OPVCC(31, 444, 0, 0)
4422 return OPVCC(31, 444, 0, 1)
4424 return OPVCC(31, 412, 0, 0)
4426 return OPVCC(31, 412, 0, 1)
4429 return OPVCC(31, 506, 0, 0) /* popcntd - v2.06 */
4431 return OPVCC(31, 378, 0, 0) /* popcntw - v2.06 */
4433 return OPVCC(31, 122, 0, 0) /* popcntb - v2.02 */
4435 return OPVCC(31, 538, 0, 0) /* cnttzw - v3.00 */
4437 return OPVCC(31, 538, 0, 1) /* cnttzw. - v3.00 */
4439 return OPVCC(31, 570, 0, 0) /* cnttzd - v3.00 */
4441 return OPVCC(31, 570, 0, 1) /* cnttzd. - v3.00 */
4444 return OPVCC(19, 50, 0, 0)
4446 return OPVCC(19, 51, 0, 0)
4448 return OPVCC(19, 18, 0, 0)
4450 return OPVCC(19, 274, 0, 0)
4453 return OPVCC(23, 0, 0, 0)
4455 return OPVCC(23, 0, 0, 1)
4458 return OPVCC(30, 8, 0, 0)
4460 return OPVCC(30, 0, 0, 1)
4463 return OPVCC(30, 9, 0, 0)
4465 return OPVCC(30, 9, 0, 1)
4468 return OPVCC(30, 0, 0, 0)
4470 return OPVCC(30, 0, 0, 1)
4472 return OPMD(30, 1, 0) // rldicr
4474 return OPMD(30, 1, 1) // rldicr.
4477 return OPMD(30, 2, 0) // rldic
4479 return OPMD(30, 2, 1) // rldic.
4482 return OPVCC(17, 1, 0, 0)
4485 return OPVCC(31, 24, 0, 0)
4487 return OPVCC(31, 24, 0, 1)
4489 return OPVCC(31, 27, 0, 0)
4491 return OPVCC(31, 27, 0, 1)
4494 return OPVCC(31, 792, 0, 0)
4496 return OPVCC(31, 792, 0, 1)
4498 return OPVCC(31, 794, 0, 0)
4500 return OPVCC(31, 794, 0, 1)
4503 return OPVCC(31, 445, 0, 0)
4505 return OPVCC(31, 445, 0, 1)
4508 return OPVCC(31, 536, 0, 0)
4510 return OPVCC(31, 536, 0, 1)
4512 return OPVCC(31, 539, 0, 0)
4514 return OPVCC(31, 539, 0, 1)
4517 return OPVCC(31, 40, 0, 0)
4519 return OPVCC(31, 40, 0, 1)
4521 return OPVCC(31, 40, 1, 0)
4523 return OPVCC(31, 40, 1, 1)
4525 return OPVCC(31, 8, 0, 0)
4527 return OPVCC(31, 8, 0, 1)
4529 return OPVCC(31, 8, 1, 0)
4531 return OPVCC(31, 8, 1, 1)
4533 return OPVCC(31, 136, 0, 0)
4535 return OPVCC(31, 136, 0, 1)
4537 return OPVCC(31, 136, 1, 0)
4539 return OPVCC(31, 136, 1, 1)
4541 return OPVCC(31, 232, 0, 0)
4543 return OPVCC(31, 232, 0, 1)
4545 return OPVCC(31, 232, 1, 0)
4547 return OPVCC(31, 232, 1, 1)
4549 return OPVCC(31, 200, 0, 0)
4551 return OPVCC(31, 200, 0, 1)
4553 return OPVCC(31, 200, 1, 0)
4555 return OPVCC(31, 200, 1, 1)
4558 return OPVCC(31, 598, 0, 0)
4560 return OPVCC(31, 598, 0, 0) | 1<<21
4563 return OPVCC(31, 598, 0, 0) | 2<<21
4566 return OPVCC(31, 306, 0, 0)
4568 return OPVCC(31, 274, 0, 0)
4570 return OPVCC(31, 566, 0, 0)
4572 return OPVCC(31, 498, 0, 0)
4574 return OPVCC(31, 434, 0, 0)
4576 return OPVCC(31, 915, 0, 0)
4578 return OPVCC(31, 851, 0, 0)
4580 return OPVCC(31, 402, 0, 0)
4583 return OPVCC(31, 4, 0, 0)
4585 return OPVCC(31, 68, 0, 0)
4587 /* Vector (VMX/Altivec) instructions */
4588 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
4589 /* are enabled starting at POWER6 (ISA 2.05). */
4591 return OPVX(4, 1028, 0, 0) /* vand - v2.03 */
4593 return OPVX(4, 1092, 0, 0) /* vandc - v2.03 */
4595 return OPVX(4, 1412, 0, 0) /* vnand - v2.07 */
4598 return OPVX(4, 1156, 0, 0) /* vor - v2.03 */
4600 return OPVX(4, 1348, 0, 0) /* vorc - v2.07 */
4602 return OPVX(4, 1284, 0, 0) /* vnor - v2.03 */
4604 return OPVX(4, 1220, 0, 0) /* vxor - v2.03 */
4606 return OPVX(4, 1668, 0, 0) /* veqv - v2.07 */
4609 return OPVX(4, 0, 0, 0) /* vaddubm - v2.03 */
4611 return OPVX(4, 64, 0, 0) /* vadduhm - v2.03 */
4613 return OPVX(4, 128, 0, 0) /* vadduwm - v2.03 */
4615 return OPVX(4, 192, 0, 0) /* vaddudm - v2.07 */
4617 return OPVX(4, 256, 0, 0) /* vadduqm - v2.07 */
4620 return OPVX(4, 320, 0, 0) /* vaddcuq - v2.07 */
4622 return OPVX(4, 384, 0, 0) /* vaddcuw - v2.03 */
4625 return OPVX(4, 512, 0, 0) /* vaddubs - v2.03 */
4627 return OPVX(4, 576, 0, 0) /* vadduhs - v2.03 */
4629 return OPVX(4, 640, 0, 0) /* vadduws - v2.03 */
4632 return OPVX(4, 768, 0, 0) /* vaddsbs - v2.03 */
4634 return OPVX(4, 832, 0, 0) /* vaddshs - v2.03 */
4636 return OPVX(4, 896, 0, 0) /* vaddsws - v2.03 */
4639 return OPVX(4, 60, 0, 0) /* vaddeuqm - v2.07 */
4641 return OPVX(4, 61, 0, 0) /* vaddecuq - v2.07 */
4644 return OPVX(4, 776, 0, 0) /* vmulesb - v2.03 */
4646 return OPVX(4, 264, 0, 0) /* vmulosb - v2.03 */
4648 return OPVX(4, 520, 0, 0) /* vmuleub - v2.03 */
4650 return OPVX(4, 8, 0, 0) /* vmuloub - v2.03 */
4652 return OPVX(4, 840, 0, 0) /* vmulesh - v2.03 */
4654 return OPVX(4, 328, 0, 0) /* vmulosh - v2.03 */
4656 return OPVX(4, 584, 0, 0) /* vmuleuh - v2.03 */
4658 return OPVX(4, 72, 0, 0) /* vmulouh - v2.03 */
4660 return OPVX(4, 904, 0, 0) /* vmulesw - v2.07 */
4662 return OPVX(4, 392, 0, 0) /* vmulosw - v2.07 */
4664 return OPVX(4, 648, 0, 0) /* vmuleuw - v2.07 */
4666 return OPVX(4, 136, 0, 0) /* vmulouw - v2.07 */
4668 return OPVX(4, 137, 0, 0) /* vmuluwm - v2.07 */
4671 return OPVX(4, 1032, 0, 0) /* vpmsumb - v2.07 */
4673 return OPVX(4, 1096, 0, 0) /* vpmsumh - v2.07 */
4675 return OPVX(4, 1160, 0, 0) /* vpmsumw - v2.07 */
4677 return OPVX(4, 1224, 0, 0) /* vpmsumd - v2.07 */
4680 return OPVX(4, 35, 0, 0) /* vmsumudm - v3.00b */
4683 return OPVX(4, 1024, 0, 0) /* vsububm - v2.03 */
4685 return OPVX(4, 1088, 0, 0) /* vsubuhm - v2.03 */
4687 return OPVX(4, 1152, 0, 0) /* vsubuwm - v2.03 */
4689 return OPVX(4, 1216, 0, 0) /* vsubudm - v2.07 */
4691 return OPVX(4, 1280, 0, 0) /* vsubuqm - v2.07 */
4694 return OPVX(4, 1344, 0, 0) /* vsubcuq - v2.07 */
4696 return OPVX(4, 1408, 0, 0) /* vsubcuw - v2.03 */
4699 return OPVX(4, 1536, 0, 0) /* vsububs - v2.03 */
4701 return OPVX(4, 1600, 0, 0) /* vsubuhs - v2.03 */
4703 return OPVX(4, 1664, 0, 0) /* vsubuws - v2.03 */
4706 return OPVX(4, 1792, 0, 0) /* vsubsbs - v2.03 */
4708 return OPVX(4, 1856, 0, 0) /* vsubshs - v2.03 */
4710 return OPVX(4, 1920, 0, 0) /* vsubsws - v2.03 */
4713 return OPVX(4, 62, 0, 0) /* vsubeuqm - v2.07 */
4715 return OPVX(4, 63, 0, 0) /* vsubecuq - v2.07 */
4718 return OPVX(4, 4, 0, 0) /* vrlb - v2.03 */
4720 return OPVX(4, 68, 0, 0) /* vrlh - v2.03 */
4722 return OPVX(4, 132, 0, 0) /* vrlw - v2.03 */
4724 return OPVX(4, 196, 0, 0) /* vrld - v2.07 */
4727 return OPVX(4, 1676, 0, 0) /* vmrgow - v2.07 */
4729 return OPVX(4, 1932, 0, 0) /* vmrgew - v2.07 */
4732 return OPVX(4, 260, 0, 0) /* vslh - v2.03 */
4734 return OPVX(4, 324, 0, 0) /* vslh - v2.03 */
4736 return OPVX(4, 388, 0, 0) /* vslw - v2.03 */
4738 return OPVX(4, 452, 0, 0) /* vsl - v2.03 */
4740 return OPVX(4, 1036, 0, 0) /* vsl - v2.03 */
4742 return OPVX(4, 516, 0, 0) /* vsrb - v2.03 */
4744 return OPVX(4, 580, 0, 0) /* vsrh - v2.03 */
4746 return OPVX(4, 644, 0, 0) /* vsrw - v2.03 */
4748 return OPVX(4, 708, 0, 0) /* vsr - v2.03 */
4750 return OPVX(4, 1100, 0, 0) /* vsro - v2.03 */
4752 return OPVX(4, 1476, 0, 0) /* vsld - v2.07 */
4754 return OPVX(4, 1732, 0, 0) /* vsrd - v2.07 */
4757 return OPVX(4, 772, 0, 0) /* vsrab - v2.03 */
4759 return OPVX(4, 836, 0, 0) /* vsrah - v2.03 */
4761 return OPVX(4, 900, 0, 0) /* vsraw - v2.03 */
4763 return OPVX(4, 964, 0, 0) /* vsrad - v2.07 */
4766 return OPVC(4, 1356, 0, 0) /* vbpermq - v2.07 */
4768 return OPVC(4, 1484, 0, 0) /* vbpermd - v3.00 */
4771 return OPVX(4, 1794, 0, 0) /* vclzb - v2.07 */
4773 return OPVX(4, 1858, 0, 0) /* vclzh - v2.07 */
4775 return OPVX(4, 1922, 0, 0) /* vclzw - v2.07 */
4777 return OPVX(4, 1986, 0, 0) /* vclzd - v2.07 */
4780 return OPVX(4, 1538, 0, 0) /* vclzlsbb - v3.0 */
4782 return OPVX(4, 1538, 0, 0) | 1<<16 /* vctzlsbb - v3.0 */
4785 return OPVX(4, 1795, 0, 0) /* vpopcntb - v2.07 */
4787 return OPVX(4, 1859, 0, 0) /* vpopcnth - v2.07 */
4789 return OPVX(4, 1923, 0, 0) /* vpopcntw - v2.07 */
4791 return OPVX(4, 1987, 0, 0) /* vpopcntd - v2.07 */
4794 return OPVC(4, 6, 0, 0) /* vcmpequb - v2.03 */
4796 return OPVC(4, 6, 0, 1) /* vcmpequb. - v2.03 */
4798 return OPVC(4, 70, 0, 0) /* vcmpequh - v2.03 */
4800 return OPVC(4, 70, 0, 1) /* vcmpequh. - v2.03 */
4802 return OPVC(4, 134, 0, 0) /* vcmpequw - v2.03 */
4804 return OPVC(4, 134, 0, 1) /* vcmpequw. - v2.03 */
4806 return OPVC(4, 199, 0, 0) /* vcmpequd - v2.07 */
4808 return OPVC(4, 199, 0, 1) /* vcmpequd. - v2.07 */
4811 return OPVC(4, 518, 0, 0) /* vcmpgtub - v2.03 */
4813 return OPVC(4, 518, 0, 1) /* vcmpgtub. - v2.03 */
4815 return OPVC(4, 582, 0, 0) /* vcmpgtuh - v2.03 */
4817 return OPVC(4, 582, 0, 1) /* vcmpgtuh. - v2.03 */
4819 return OPVC(4, 646, 0, 0) /* vcmpgtuw - v2.03 */
4821 return OPVC(4, 646, 0, 1) /* vcmpgtuw. - v2.03 */
4823 return OPVC(4, 711, 0, 0) /* vcmpgtud - v2.07 */
4825 return OPVC(4, 711, 0, 1) /* vcmpgtud. v2.07 */
4827 return OPVC(4, 774, 0, 0) /* vcmpgtsb - v2.03 */
4829 return OPVC(4, 774, 0, 1) /* vcmpgtsb. - v2.03 */
4831 return OPVC(4, 838, 0, 0) /* vcmpgtsh - v2.03 */
4833 return OPVC(4, 838, 0, 1) /* vcmpgtsh. - v2.03 */
4835 return OPVC(4, 902, 0, 0) /* vcmpgtsw - v2.03 */
4837 return OPVC(4, 902, 0, 1) /* vcmpgtsw. - v2.03 */
4839 return OPVC(4, 967, 0, 0) /* vcmpgtsd - v2.07 */
4841 return OPVC(4, 967, 0, 1) /* vcmpgtsd. - v2.07 */
4844 return OPVC(4, 263, 0, 0) /* vcmpnezb - v3.00 */
4846 return OPVC(4, 263, 0, 1) /* vcmpnezb. - v3.00 */
4848 return OPVC(4, 7, 0, 0) /* vcmpneb - v3.00 */
4850 return OPVC(4, 7, 0, 1) /* vcmpneb. - v3.00 */
4852 return OPVC(4, 71, 0, 0) /* vcmpneh - v3.00 */
4854 return OPVC(4, 71, 0, 1) /* vcmpneh. - v3.00 */
4856 return OPVC(4, 135, 0, 0) /* vcmpnew - v3.00 */
4858 return OPVC(4, 135, 0, 1) /* vcmpnew. - v3.00 */
4861 return OPVX(4, 43, 0, 0) /* vperm - v2.03 */
4863 return OPVX(4, 45, 0, 0) /* vpermxor - v2.03 */
4865 return OPVX(4, 59, 0, 0) /* vpermr - v3.0 */
4868 return OPVX(4, 42, 0, 0) /* vsel - v2.03 */
4871 return OPVX(4, 1288, 0, 0) /* vcipher - v2.07 */
4873 return OPVX(4, 1289, 0, 0) /* vcipherlast - v2.07 */
4875 return OPVX(4, 1352, 0, 0) /* vncipher - v2.07 */
4877 return OPVX(4, 1353, 0, 0) /* vncipherlast - v2.07 */
4879 return OPVX(4, 1480, 0, 0) /* vsbox - v2.07 */
4880 /* End of vector instructions */
4882 /* Vector scalar (VSX) instructions */
4883 /* ISA 2.06 enables these for POWER7. */
4884 case AMFVSRD, AMFVRD, AMFFPRD:
4885 return OPVXX1(31, 51, 0) /* mfvsrd - v2.07 */
4887 return OPVXX1(31, 115, 0) /* mfvsrwz - v2.07 */
4889 return OPVXX1(31, 307, 0) /* mfvsrld - v3.00 */
4891 case AMTVSRD, AMTFPRD, AMTVRD:
4892 return OPVXX1(31, 179, 0) /* mtvsrd - v2.07 */
4894 return OPVXX1(31, 211, 0) /* mtvsrwa - v2.07 */
4896 return OPVXX1(31, 243, 0) /* mtvsrwz - v2.07 */
4898 return OPVXX1(31, 435, 0) /* mtvsrdd - v3.00 */
4900 return OPVXX1(31, 403, 0) /* mtvsrws - v3.00 */
4903 return OPVXX3(60, 130, 0) /* xxland - v2.06 */
4905 return OPVXX3(60, 138, 0) /* xxlandc - v2.06 */
4907 return OPVXX3(60, 186, 0) /* xxleqv - v2.07 */
4909 return OPVXX3(60, 178, 0) /* xxlnand - v2.07 */
4912 return OPVXX3(60, 170, 0) /* xxlorc - v2.07 */
4914 return OPVXX3(60, 162, 0) /* xxlnor - v2.06 */
4915 case AXXLOR, AXXLORQ:
4916 return OPVXX3(60, 146, 0) /* xxlor - v2.06 */
4918 return OPVXX3(60, 154, 0) /* xxlxor - v2.06 */
4921 return OPVXX4(60, 3, 0) /* xxsel - v2.06 */
4924 return OPVXX3(60, 18, 0) /* xxmrghw - v2.06 */
4926 return OPVXX3(60, 50, 0) /* xxmrglw - v2.06 */
4929 return OPVXX2(60, 164, 0) /* xxspltw - v2.06 */
4932 return OPVCC(60, 360, 0, 0) /* xxspltib - v3.0 */
4935 return OPVXX3(60, 26, 0) /* xxperm - v2.06 */
4937 return OPVXX3(60, 10, 0) /* xxpermdi - v2.06 */
4940 return OPVXX3(60, 2, 0) /* xxsldwi - v2.06 */
4943 return OPVXX2VA(60, 475, 31) /* xxbrq - v3.0 */
4945 return OPVXX2VA(60, 475, 23) /* xxbrd - v3.0 */
4947 return OPVXX2VA(60, 475, 15) /* xxbrw - v3.0 */
4949 return OPVXX2VA(60, 475, 7) /* xxbrh - v3.0 */
4952 return OPVXX2(60, 265, 0) /* xscvdpsp - v2.06 */
4954 return OPVXX2(60, 329, 0) /* xscvspdp - v2.06 */
4956 return OPVXX2(60, 267, 0) /* xscvdpspn - v2.07 */
4958 return OPVXX2(60, 331, 0) /* xscvspdpn - v2.07 */
4961 return OPVXX2(60, 393, 0) /* xvcvdpsp - v2.06 */
4963 return OPVXX2(60, 457, 0) /* xvcvspdp - v2.06 */
4966 return OPVXX2(60, 344, 0) /* xscvdpsxds - v2.06 */
4968 return OPVXX2(60, 88, 0) /* xscvdpsxws - v2.06 */
4970 return OPVXX2(60, 328, 0) /* xscvdpuxds - v2.06 */
4972 return OPVXX2(60, 72, 0) /* xscvdpuxws - v2.06 */
4975 return OPVXX2(60, 376, 0) /* xscvsxddp - v2.06 */
4977 return OPVXX2(60, 360, 0) /* xscvuxddp - v2.06 */
4979 return OPVXX2(60, 312, 0) /* xscvsxdsp - v2.06 */
4981 return OPVXX2(60, 296, 0) /* xscvuxdsp - v2.06 */
4984 return OPVXX2(60, 472, 0) /* xvcvdpsxds - v2.06 */
4986 return OPVXX2(60, 216, 0) /* xvcvdpsxws - v2.06 */
4988 return OPVXX2(60, 456, 0) /* xvcvdpuxds - v2.06 */
4990 return OPVXX2(60, 200, 0) /* xvcvdpuxws - v2.06 */
4992 return OPVXX2(60, 408, 0) /* xvcvspsxds - v2.07 */
4994 return OPVXX2(60, 152, 0) /* xvcvspsxws - v2.07 */
4996 return OPVXX2(60, 392, 0) /* xvcvspuxds - v2.07 */
4998 return OPVXX2(60, 136, 0) /* xvcvspuxws - v2.07 */
5001 return OPVXX2(60, 504, 0) /* xvcvsxddp - v2.06 */
5003 return OPVXX2(60, 248, 0) /* xvcvsxwdp - v2.06 */
5005 return OPVXX2(60, 488, 0) /* xvcvuxddp - v2.06 */
5007 return OPVXX2(60, 232, 0) /* xvcvuxwdp - v2.06 */
5009 return OPVXX2(60, 440, 0) /* xvcvsxdsp - v2.06 */
5011 return OPVXX2(60, 184, 0) /* xvcvsxwsp - v2.06 */
5013 return OPVXX2(60, 424, 0) /* xvcvuxdsp - v2.06 */
5015 return OPVXX2(60, 168, 0) /* xvcvuxwsp - v2.06 */
5016 /* End of VSX instructions */
5019 return OPVX(4, 48, 0, 0) /* maddhd - v3.00 */
5021 return OPVX(4, 49, 0, 0) /* maddhdu - v3.00 */
5023 return OPVX(4, 51, 0, 0) /* maddld - v3.00 */
5026 return OPVCC(31, 316, 0, 0)
5028 return OPVCC(31, 316, 0, 1)
5031 c.ctxt.Diag("bad r/r, r/r/r or r/r/r/r opcode %v", a)
5035 func (c *ctxt9) opirrr(a obj.As) uint32 {
5037 /* Vector (VMX/Altivec) instructions */
5038 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
5039 /* are enabled starting at POWER6 (ISA 2.05). */
5041 return OPVX(4, 44, 0, 0) /* vsldoi - v2.03 */
5044 c.ctxt.Diag("bad i/r/r/r opcode %v", a)
5048 func (c *ctxt9) opiirr(a obj.As) uint32 {
5050 /* Vector (VMX/Altivec) instructions */
5051 /* ISA 2.07 enables these for POWER8 and beyond. */
5053 return OPVX(4, 1666, 0, 0) /* vshasigmaw - v2.07 */
5055 return OPVX(4, 1730, 0, 0) /* vshasigmad - v2.07 */
5058 c.ctxt.Diag("bad i/i/r/r opcode %v", a)
5062 func (c *ctxt9) opirr(a obj.As) uint32 {
5065 return OPVCC(14, 0, 0, 0)
5067 return OPVCC(12, 0, 0, 0)
5069 return OPVCC(13, 0, 0, 0)
5071 return OPVCC(15, 0, 0, 0) /* ADDIS */
5074 return OPVCC(28, 0, 0, 0)
5076 return OPVCC(29, 0, 0, 0) /* ANDIS. */
5079 return OPVCC(18, 0, 0, 0)
5081 return OPVCC(18, 0, 0, 0) | 1
5083 return OPVCC(18, 0, 0, 0) | 1
5085 return OPVCC(18, 0, 0, 0) | 1
5087 return OPVCC(16, 0, 0, 0)
5089 return OPVCC(16, 0, 0, 0) | 1
5092 return AOP_RRR(16<<26, BO_BCR, BI_EQ, 0)
5094 return AOP_RRR(16<<26, BO_NOTBCR, BI_LT, 0)
5096 return AOP_RRR(16<<26, BO_BCR, BI_GT, 0)
5098 return AOP_RRR(16<<26, BO_NOTBCR, BI_GT, 0)
5100 return AOP_RRR(16<<26, BO_BCR, BI_LT, 0)
5102 return AOP_RRR(16<<26, BO_NOTBCR, BI_EQ, 0)
5104 return AOP_RRR(16<<26, BO_NOTBCR, BI_FU, 0)
5106 return AOP_RRR(16<<26, BO_BCR, BI_FU, 0)
5108 return AOP_RRR(16<<26, BO_NOTBCTR, 0, 0)
5110 return AOP_RRR(16<<26, BO_BCTR, 0, 0)
5113 return OPVCC(11, 0, 0, 0) | 1<<21 /* L=1 */
5115 return OPVCC(10, 0, 0, 0) | 1<<21
5117 return OPVCC(11, 0, 0, 0) /* L=0 */
5119 return OPVCC(10, 0, 0, 0)
5121 return OPVCC(31, 224, 0, 0) /* cmpeqb - v3.00 */
5124 return OPVCC(31, 597, 0, 0)
5127 return OPVCC(31, 774, 0, 0) /* copy - v3.00 */
5129 return OPVCC(31, 902, 0, 1) /* paste. - v3.00 */
5131 return OPVCC(31, 755, 0, 0) /* darn - v3.00 */
5133 case AMULLW, AMULLD:
5134 return OPVCC(7, 0, 0, 0) /* mulli works with MULLW or MULLD */
5137 return OPVCC(24, 0, 0, 0)
5139 return OPVCC(25, 0, 0, 0) /* ORIS */
5142 return OPVCC(20, 0, 0, 0) /* rlwimi */
5144 return OPVCC(20, 0, 0, 1)
5146 return OPMD(30, 3, 0) /* rldimi */
5148 return OPMD(30, 3, 1) /* rldimi. */
5150 return OPMD(30, 3, 0) /* rldimi */
5152 return OPMD(30, 3, 1) /* rldimi. */
5154 return OPVCC(21, 0, 0, 0) /* rlwinm */
5156 return OPVCC(21, 0, 0, 1)
5159 return OPMD(30, 0, 0) /* rldicl */
5161 return OPMD(30, 0, 1) /* rldicl. */
5163 return OPMD(30, 1, 0) /* rldicr */
5165 return OPMD(30, 1, 1) /* rldicr. */
5167 return OPMD(30, 2, 0) /* rldic */
5169 return OPMD(30, 2, 1) /* rldic. */
5172 return OPVCC(31, 824, 0, 0)
5174 return OPVCC(31, 824, 0, 1)
5176 return OPVCC(31, (413 << 1), 0, 0)
5178 return OPVCC(31, (413 << 1), 0, 1)
5180 return OPVCC(31, 445, 0, 0)
5182 return OPVCC(31, 445, 0, 1)
5185 return OPVCC(31, 725, 0, 0)
5188 return OPVCC(8, 0, 0, 0)
5191 return OPVCC(3, 0, 0, 0)
5193 return OPVCC(2, 0, 0, 0)
5195 /* Vector (VMX/Altivec) instructions */
5196 /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
5197 /* are enabled starting at POWER6 (ISA 2.05). */
5199 return OPVX(4, 524, 0, 0) /* vspltb - v2.03 */
5201 return OPVX(4, 588, 0, 0) /* vsplth - v2.03 */
5203 return OPVX(4, 652, 0, 0) /* vspltw - v2.03 */
5206 return OPVX(4, 780, 0, 0) /* vspltisb - v2.03 */
5208 return OPVX(4, 844, 0, 0) /* vspltish - v2.03 */
5210 return OPVX(4, 908, 0, 0) /* vspltisw - v2.03 */
5211 /* End of vector instructions */
5214 return OPVCC(63, 128, 0, 0) /* ftdiv - v2.06 */
5216 return OPVCC(63, 160, 0, 0) /* ftsqrt - v2.06 */
5219 return OPVCC(26, 0, 0, 0) /* XORIL */
5221 return OPVCC(27, 0, 0, 0) /* XORIS */
5224 c.ctxt.Diag("bad opcode i/r or i/r/r %v", a)
5231 func (c *ctxt9) opload(a obj.As) uint32 {
5234 return OPVCC(58, 0, 0, 0) /* ld */
5236 return OPVCC(58, 0, 0, 1) /* ldu */
5238 return OPVCC(32, 0, 0, 0) /* lwz */
5240 return OPVCC(33, 0, 0, 0) /* lwzu */
5242 return OPVCC(58, 0, 0, 0) | 1<<1 /* lwa */
5244 return OPDQ(61, 1, 0) /* lxv - ISA v3.0 */
5246 return OPVXX1(31, 269, 0) /* lxvl - ISA v3.0 */
5248 return OPVXX1(31, 301, 0) /* lxvll - ISA v3.0 */
5250 return OPVXX1(31, 268, 0) /* lxvx - ISA v3.0 */
5254 return OPVCC(34, 0, 0, 0)
5257 case AMOVBU, AMOVBZU:
5258 return OPVCC(35, 0, 0, 0)
5260 return OPVCC(50, 0, 0, 0)
5262 return OPVCC(51, 0, 0, 0)
5264 return OPVCC(48, 0, 0, 0)
5266 return OPVCC(49, 0, 0, 0)
5268 return OPVCC(42, 0, 0, 0)
5270 return OPVCC(43, 0, 0, 0)
5272 return OPVCC(40, 0, 0, 0)
5274 return OPVCC(41, 0, 0, 0)
5276 return OPVCC(46, 0, 0, 0) /* lmw */
5279 c.ctxt.Diag("bad load opcode %v", a)
5284 * indexed load a(b),d
5286 func (c *ctxt9) oploadx(a obj.As) uint32 {
5289 return OPVCC(31, 23, 0, 0) /* lwzx */
5291 return OPVCC(31, 55, 0, 0) /* lwzux */
5293 return OPVCC(31, 341, 0, 0) /* lwax */
5295 return OPVCC(31, 373, 0, 0) /* lwaux */
5298 return OPVCC(31, 87, 0, 0) /* lbzx */
5300 case AMOVBU, AMOVBZU:
5301 return OPVCC(31, 119, 0, 0) /* lbzux */
5303 return OPVCC(31, 599, 0, 0) /* lfdx */
5305 return OPVCC(31, 631, 0, 0) /* lfdux */
5307 return OPVCC(31, 535, 0, 0) /* lfsx */
5309 return OPVCC(31, 567, 0, 0) /* lfsux */
5311 return OPVCC(31, 855, 0, 0) /* lfiwax - power6, isa 2.05 */
5313 return OPVCC(31, 887, 0, 0) /* lfiwzx - power7, isa 2.06 */
5315 return OPVCC(31, 343, 0, 0) /* lhax */
5317 return OPVCC(31, 375, 0, 0) /* lhaux */
5319 return OPVCC(31, 790, 0, 0) /* lhbrx */
5321 return OPVCC(31, 534, 0, 0) /* lwbrx */
5323 return OPVCC(31, 532, 0, 0) /* ldbrx */
5325 return OPVCC(31, 279, 0, 0) /* lhzx */
5327 return OPVCC(31, 311, 0, 0) /* lhzux */
5329 return OPVCC(31, 52, 0, 0) /* lbarx */
5331 return OPVCC(31, 116, 0, 0) /* lharx */
5333 return OPVCC(31, 20, 0, 0) /* lwarx */
5335 return OPVCC(31, 84, 0, 0) /* ldarx */
5337 return OPVCC(31, 533, 0, 0) /* lswx */
5339 return OPVCC(31, 21, 0, 0) /* ldx */
5341 return OPVCC(31, 53, 0, 0) /* ldux */
5343 /* Vector (VMX/Altivec) instructions */
5345 return OPVCC(31, 7, 0, 0) /* lvebx - v2.03 */
5347 return OPVCC(31, 39, 0, 0) /* lvehx - v2.03 */
5349 return OPVCC(31, 71, 0, 0) /* lvewx - v2.03 */
5351 return OPVCC(31, 103, 0, 0) /* lvx - v2.03 */
5353 return OPVCC(31, 359, 0, 0) /* lvxl - v2.03 */
5355 return OPVCC(31, 6, 0, 0) /* lvsl - v2.03 */
5357 return OPVCC(31, 38, 0, 0) /* lvsr - v2.03 */
5358 /* End of vector instructions */
5360 /* Vector scalar (VSX) instructions */
5362 return OPVXX1(31, 268, 0) /* lxvx - ISA v3.0 */
5364 return OPVXX1(31, 844, 0) /* lxvd2x - v2.06 */
5366 return OPVXX1(31, 780, 0) /* lxvw4x - v2.06 */
5368 return OPVXX1(31, 812, 0) /* lxvh8x - v3.00 */
5370 return OPVXX1(31, 876, 0) /* lxvb16x - v3.00 */
5372 return OPVXX1(31, 332, 0) /* lxvdsx - v2.06 */
5374 return OPVXX1(31, 588, 0) /* lxsdx - v2.06 */
5376 return OPVXX1(31, 76, 0) /* lxsiwax - v2.07 */
5378 return OPVXX1(31, 12, 0) /* lxsiwzx - v2.07 */
5381 c.ctxt.Diag("bad loadx opcode %v", a)
5388 func (c *ctxt9) opstore(a obj.As) uint32 {
5391 return OPVCC(38, 0, 0, 0) /* stb */
5393 case AMOVBU, AMOVBZU:
5394 return OPVCC(39, 0, 0, 0) /* stbu */
5396 return OPVCC(54, 0, 0, 0) /* stfd */
5398 return OPVCC(55, 0, 0, 0) /* stfdu */
5400 return OPVCC(52, 0, 0, 0) /* stfs */
5402 return OPVCC(53, 0, 0, 0) /* stfsu */
5405 return OPVCC(44, 0, 0, 0) /* sth */
5407 case AMOVHZU, AMOVHU:
5408 return OPVCC(45, 0, 0, 0) /* sthu */
5410 return OPVCC(47, 0, 0, 0) /* stmw */
5412 return OPVCC(31, 725, 0, 0) /* stswi */
5415 return OPVCC(36, 0, 0, 0) /* stw */
5417 case AMOVWZU, AMOVWU:
5418 return OPVCC(37, 0, 0, 0) /* stwu */
5420 return OPVCC(62, 0, 0, 0) /* std */
5422 return OPVCC(62, 0, 0, 1) /* stdu */
5424 return OPDQ(61, 5, 0) /* stxv ISA 3.0 */
5426 return OPVXX1(31, 397, 0) /* stxvl ISA 3.0 */
5428 return OPVXX1(31, 429, 0) /* stxvll ISA 3.0 */
5430 return OPVXX1(31, 396, 0) /* stxvx - ISA v3.0 */
5434 c.ctxt.Diag("unknown store opcode %v", a)
5439 * indexed store s,a(b)
5441 func (c *ctxt9) opstorex(a obj.As) uint32 {
5444 return OPVCC(31, 215, 0, 0) /* stbx */
5446 case AMOVBU, AMOVBZU:
5447 return OPVCC(31, 247, 0, 0) /* stbux */
5449 return OPVCC(31, 727, 0, 0) /* stfdx */
5451 return OPVCC(31, 759, 0, 0) /* stfdux */
5453 return OPVCC(31, 663, 0, 0) /* stfsx */
5455 return OPVCC(31, 695, 0, 0) /* stfsux */
5457 return OPVCC(31, 983, 0, 0) /* stfiwx */
5460 return OPVCC(31, 407, 0, 0) /* sthx */
5462 return OPVCC(31, 918, 0, 0) /* sthbrx */
5464 case AMOVHZU, AMOVHU:
5465 return OPVCC(31, 439, 0, 0) /* sthux */
5468 return OPVCC(31, 151, 0, 0) /* stwx */
5470 case AMOVWZU, AMOVWU:
5471 return OPVCC(31, 183, 0, 0) /* stwux */
5473 return OPVCC(31, 661, 0, 0) /* stswx */
5475 return OPVCC(31, 662, 0, 0) /* stwbrx */
5477 return OPVCC(31, 660, 0, 0) /* stdbrx */
5479 return OPVCC(31, 694, 0, 1) /* stbcx. */
5481 return OPVCC(31, 726, 0, 1) /* sthcx. */
5483 return OPVCC(31, 150, 0, 1) /* stwcx. */
5485 return OPVCC(31, 214, 0, 1) /* stwdx. */
5487 return OPVCC(31, 149, 0, 0) /* stdx */
5489 return OPVCC(31, 181, 0, 0) /* stdux */
5491 /* Vector (VMX/Altivec) instructions */
5493 return OPVCC(31, 135, 0, 0) /* stvebx - v2.03 */
5495 return OPVCC(31, 167, 0, 0) /* stvehx - v2.03 */
5497 return OPVCC(31, 199, 0, 0) /* stvewx - v2.03 */
5499 return OPVCC(31, 231, 0, 0) /* stvx - v2.03 */
5501 return OPVCC(31, 487, 0, 0) /* stvxl - v2.03 */
5502 /* End of vector instructions */
5504 /* Vector scalar (VSX) instructions */
5506 return OPVXX1(31, 396, 0) /* stxvx - v3.0 */
5508 return OPVXX1(31, 972, 0) /* stxvd2x - v2.06 */
5510 return OPVXX1(31, 908, 0) /* stxvw4x - v2.06 */
5512 return OPVXX1(31, 940, 0) /* stxvh8x - v3.0 */
5514 return OPVXX1(31, 1004, 0) /* stxvb16x - v3.0 */
5517 return OPVXX1(31, 716, 0) /* stxsdx - v2.06 */
5520 return OPVXX1(31, 140, 0) /* stxsiwx - v2.07 */
5522 /* End of vector scalar instructions */
5526 c.ctxt.Diag("unknown storex opcode %v", a)