1 // Copyright 2016 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
8 "cmd/compile/internal/base"
9 "cmd/compile/internal/ir"
10 "cmd/compile/internal/objw"
11 "cmd/compile/internal/ssa"
12 "cmd/compile/internal/ssagen"
13 "cmd/compile/internal/types"
15 "cmd/internal/obj/riscv"
18 // ssaRegToReg maps ssa register numbers to obj register numbers.
19 var ssaRegToReg = []int16{
84 0, // SB isn't a real register. We fill an Addr.Reg field with 0 in this case.
87 func loadByType(t *types.Type) obj.As {
97 base.Fatalf("unknown float width for load %d in type %v", width, t)
124 base.Fatalf("unknown width for load %d in type %v", width, t)
129 // storeByType returns the store instruction of the given type.
130 func storeByType(t *types.Type) obj.As {
140 base.Fatalf("unknown float width for store %d in type %v", width, t)
155 base.Fatalf("unknown width for store %d in type %v", width, t)
160 // largestMove returns the largest move instruction possible and its size,
161 // given the alignment of the total size of the move.
163 // e.g., a 16-byte move may use MOV, but an 11-byte move must use MOVB.
165 // Note that the moves may not be on naturally aligned addresses depending on
166 // the source and destination.
168 // This matches the calculation in ssa.moveSize.
169 func largestMove(alignment int64) (obj.As, int64) {
171 case alignment%8 == 0:
173 case alignment%4 == 0:
174 return riscv.AMOVW, 4
175 case alignment%2 == 0:
176 return riscv.AMOVH, 2
178 return riscv.AMOVB, 1
182 // ssaMarkMoves marks any MOVXconst ops that need to avoid clobbering flags.
183 // RISC-V has no flags, so this is a no-op.
184 func ssaMarkMoves(s *ssagen.State, b *ssa.Block) {}
186 func ssaGenValue(s *ssagen.State, v *ssa.Value) {
191 // memory arg needs no code
193 // input args need no code
195 ssagen.CheckLoweredPhi(v)
196 case ssa.OpCopy, ssa.OpRISCV64MOVDreg:
197 if v.Type.IsMemory() {
200 rs := v.Args[0].Reg()
206 if v.Type.IsFloat() {
210 p.From.Type = obj.TYPE_REG
212 p.To.Type = obj.TYPE_REG
214 case ssa.OpRISCV64MOVDnop:
217 if v.Type.IsFlags() {
218 v.Fatalf("load flags not implemented: %v", v.LongString())
221 p := s.Prog(loadByType(v.Type))
222 ssagen.AddrAuto(&p.From, v.Args[0])
223 p.To.Type = obj.TYPE_REG
226 if v.Type.IsFlags() {
227 v.Fatalf("store flags not implemented: %v", v.LongString())
230 p := s.Prog(storeByType(v.Type))
231 p.From.Type = obj.TYPE_REG
232 p.From.Reg = v.Args[0].Reg()
233 ssagen.AddrAuto(&p.To, v)
234 case ssa.OpArgIntReg, ssa.OpArgFloatReg:
235 // The assembler needs to wrap the entry safepoint/stack growth code with spill/unspill
236 // The loop only runs once.
237 for _, a := range v.Block.Func.RegArgs {
238 // Pass the spill/unspill information along to the assembler, offset by size of
239 // the saved LR slot.
240 addr := ssagen.SpillSlotAddr(a, riscv.REG_SP, base.Ctxt.Arch.FixedFrameSize)
241 s.FuncInfo().AddSpill(
242 obj.RegSpill{Reg: a.Reg, Addr: addr, Unspill: loadByType(a.Type), Spill: storeByType(a.Type)})
244 v.Block.Func.RegArgs = nil
246 ssagen.CheckArgReg(v)
247 case ssa.OpSP, ssa.OpSB, ssa.OpGetG:
249 case ssa.OpRISCV64MOVBreg, ssa.OpRISCV64MOVHreg, ssa.OpRISCV64MOVWreg,
250 ssa.OpRISCV64MOVBUreg, ssa.OpRISCV64MOVHUreg, ssa.OpRISCV64MOVWUreg:
252 for a.Op == ssa.OpCopy || a.Op == ssa.OpRISCV64MOVDreg {
256 rs := v.Args[0].Reg()
258 if a.Op == ssa.OpLoadReg {
261 case v.Op == ssa.OpRISCV64MOVBreg && t.Size() == 1 && t.IsSigned(),
262 v.Op == ssa.OpRISCV64MOVHreg && t.Size() == 2 && t.IsSigned(),
263 v.Op == ssa.OpRISCV64MOVWreg && t.Size() == 4 && t.IsSigned(),
264 v.Op == ssa.OpRISCV64MOVBUreg && t.Size() == 1 && !t.IsSigned(),
265 v.Op == ssa.OpRISCV64MOVHUreg && t.Size() == 2 && !t.IsSigned(),
266 v.Op == ssa.OpRISCV64MOVWUreg && t.Size() == 4 && !t.IsSigned():
267 // arg is a proper-typed load and already sign/zero-extended
276 p.From.Type = obj.TYPE_REG
278 p.To.Type = obj.TYPE_REG
280 case ssa.OpRISCV64ADD, ssa.OpRISCV64SUB, ssa.OpRISCV64SUBW, ssa.OpRISCV64XOR, ssa.OpRISCV64OR, ssa.OpRISCV64AND,
281 ssa.OpRISCV64SLL, ssa.OpRISCV64SRA, ssa.OpRISCV64SRAW, ssa.OpRISCV64SRL, ssa.OpRISCV64SRLW,
282 ssa.OpRISCV64SLT, ssa.OpRISCV64SLTU, ssa.OpRISCV64MUL, ssa.OpRISCV64MULW, ssa.OpRISCV64MULH,
283 ssa.OpRISCV64MULHU, ssa.OpRISCV64DIV, ssa.OpRISCV64DIVU, ssa.OpRISCV64DIVW,
284 ssa.OpRISCV64DIVUW, ssa.OpRISCV64REM, ssa.OpRISCV64REMU, ssa.OpRISCV64REMW,
286 ssa.OpRISCV64FADDS, ssa.OpRISCV64FSUBS, ssa.OpRISCV64FMULS, ssa.OpRISCV64FDIVS,
287 ssa.OpRISCV64FEQS, ssa.OpRISCV64FNES, ssa.OpRISCV64FLTS, ssa.OpRISCV64FLES,
288 ssa.OpRISCV64FADDD, ssa.OpRISCV64FSUBD, ssa.OpRISCV64FMULD, ssa.OpRISCV64FDIVD,
289 ssa.OpRISCV64FEQD, ssa.OpRISCV64FNED, ssa.OpRISCV64FLTD, ssa.OpRISCV64FLED,
292 r1 := v.Args[0].Reg()
293 r2 := v.Args[1].Reg()
294 p := s.Prog(v.Op.Asm())
295 p.From.Type = obj.TYPE_REG
298 p.To.Type = obj.TYPE_REG
300 case ssa.OpRISCV64LoweredMuluhilo:
301 r0 := v.Args[0].Reg()
302 r1 := v.Args[1].Reg()
303 p := s.Prog(riscv.AMULHU)
304 p.From.Type = obj.TYPE_REG
307 p.To.Type = obj.TYPE_REG
309 p1 := s.Prog(riscv.AMUL)
310 p1.From.Type = obj.TYPE_REG
313 p1.To.Type = obj.TYPE_REG
315 case ssa.OpRISCV64LoweredMuluover:
316 r0 := v.Args[0].Reg()
317 r1 := v.Args[1].Reg()
318 p := s.Prog(riscv.AMULHU)
319 p.From.Type = obj.TYPE_REG
322 p.To.Type = obj.TYPE_REG
324 p1 := s.Prog(riscv.AMUL)
325 p1.From.Type = obj.TYPE_REG
328 p1.To.Type = obj.TYPE_REG
330 p2 := s.Prog(riscv.ASNEZ)
331 p2.From.Type = obj.TYPE_REG
332 p2.From.Reg = v.Reg1()
333 p2.To.Type = obj.TYPE_REG
335 case ssa.OpRISCV64FMADDD, ssa.OpRISCV64FMSUBD, ssa.OpRISCV64FNMADDD, ssa.OpRISCV64FNMSUBD,
336 ssa.OpRISCV64FMADDS, ssa.OpRISCV64FMSUBS, ssa.OpRISCV64FNMADDS, ssa.OpRISCV64FNMSUBS:
338 r1 := v.Args[0].Reg()
339 r2 := v.Args[1].Reg()
340 r3 := v.Args[2].Reg()
341 p := s.Prog(v.Op.Asm())
342 p.From.Type = obj.TYPE_REG
345 p.AddRestSource(obj.Addr{Type: obj.TYPE_REG, Reg: r3})
346 p.To.Type = obj.TYPE_REG
348 case ssa.OpRISCV64FSQRTS, ssa.OpRISCV64FNEGS, ssa.OpRISCV64FABSD, ssa.OpRISCV64FSQRTD, ssa.OpRISCV64FNEGD,
349 ssa.OpRISCV64FMVSX, ssa.OpRISCV64FMVDX,
350 ssa.OpRISCV64FCVTSW, ssa.OpRISCV64FCVTSL, ssa.OpRISCV64FCVTWS, ssa.OpRISCV64FCVTLS,
351 ssa.OpRISCV64FCVTDW, ssa.OpRISCV64FCVTDL, ssa.OpRISCV64FCVTWD, ssa.OpRISCV64FCVTLD, ssa.OpRISCV64FCVTDS, ssa.OpRISCV64FCVTSD,
352 ssa.OpRISCV64NOT, ssa.OpRISCV64NEG, ssa.OpRISCV64NEGW:
353 p := s.Prog(v.Op.Asm())
354 p.From.Type = obj.TYPE_REG
355 p.From.Reg = v.Args[0].Reg()
356 p.To.Type = obj.TYPE_REG
358 case ssa.OpRISCV64ADDI, ssa.OpRISCV64ADDIW, ssa.OpRISCV64XORI, ssa.OpRISCV64ORI, ssa.OpRISCV64ANDI,
359 ssa.OpRISCV64SLLI, ssa.OpRISCV64SRAI, ssa.OpRISCV64SRAIW, ssa.OpRISCV64SRLI, ssa.OpRISCV64SRLIW, ssa.OpRISCV64SLTI,
361 p := s.Prog(v.Op.Asm())
362 p.From.Type = obj.TYPE_CONST
363 p.From.Offset = v.AuxInt
364 p.Reg = v.Args[0].Reg()
365 p.To.Type = obj.TYPE_REG
367 case ssa.OpRISCV64MOVDconst:
368 p := s.Prog(v.Op.Asm())
369 p.From.Type = obj.TYPE_CONST
370 p.From.Offset = v.AuxInt
371 p.To.Type = obj.TYPE_REG
373 case ssa.OpRISCV64MOVaddr:
374 p := s.Prog(v.Op.Asm())
375 p.From.Type = obj.TYPE_ADDR
376 p.To.Type = obj.TYPE_REG
380 // MOVW $sym+off(base), R
381 switch v.Aux.(type) {
383 v.Fatalf("aux is of unknown type %T", v.Aux)
386 ssagen.AddAux(&p.From, v)
389 ssagen.AddAux(&p.From, v)
391 // No sym, just MOVW $off(SP), R
393 p.From.Reg = riscv.REG_SP
394 p.From.Offset = v.AuxInt
396 if reg := v.Args[0].RegName(); reg != wantreg {
397 v.Fatalf("bad reg %s for symbol type %T, want %s", reg, v.Aux, wantreg)
399 case ssa.OpRISCV64MOVBload, ssa.OpRISCV64MOVHload, ssa.OpRISCV64MOVWload, ssa.OpRISCV64MOVDload,
400 ssa.OpRISCV64MOVBUload, ssa.OpRISCV64MOVHUload, ssa.OpRISCV64MOVWUload,
401 ssa.OpRISCV64FMOVWload, ssa.OpRISCV64FMOVDload:
402 p := s.Prog(v.Op.Asm())
403 p.From.Type = obj.TYPE_MEM
404 p.From.Reg = v.Args[0].Reg()
405 ssagen.AddAux(&p.From, v)
406 p.To.Type = obj.TYPE_REG
408 case ssa.OpRISCV64MOVBstore, ssa.OpRISCV64MOVHstore, ssa.OpRISCV64MOVWstore, ssa.OpRISCV64MOVDstore,
409 ssa.OpRISCV64FMOVWstore, ssa.OpRISCV64FMOVDstore:
410 p := s.Prog(v.Op.Asm())
411 p.From.Type = obj.TYPE_REG
412 p.From.Reg = v.Args[1].Reg()
413 p.To.Type = obj.TYPE_MEM
414 p.To.Reg = v.Args[0].Reg()
415 ssagen.AddAux(&p.To, v)
416 case ssa.OpRISCV64MOVBstorezero, ssa.OpRISCV64MOVHstorezero, ssa.OpRISCV64MOVWstorezero, ssa.OpRISCV64MOVDstorezero:
417 p := s.Prog(v.Op.Asm())
418 p.From.Type = obj.TYPE_REG
419 p.From.Reg = riscv.REG_ZERO
420 p.To.Type = obj.TYPE_MEM
421 p.To.Reg = v.Args[0].Reg()
422 ssagen.AddAux(&p.To, v)
423 case ssa.OpRISCV64SEQZ, ssa.OpRISCV64SNEZ:
424 p := s.Prog(v.Op.Asm())
425 p.From.Type = obj.TYPE_REG
426 p.From.Reg = v.Args[0].Reg()
427 p.To.Type = obj.TYPE_REG
429 case ssa.OpRISCV64CALLstatic, ssa.OpRISCV64CALLclosure, ssa.OpRISCV64CALLinter:
431 case ssa.OpRISCV64CALLtail:
433 case ssa.OpRISCV64LoweredWB:
434 p := s.Prog(obj.ACALL)
435 p.To.Type = obj.TYPE_MEM
436 p.To.Name = obj.NAME_EXTERN
437 // AuxInt encodes how many buffer entries we need.
438 p.To.Sym = ir.Syms.GCWriteBarrier[v.AuxInt-1]
439 case ssa.OpRISCV64LoweredPanicBoundsA, ssa.OpRISCV64LoweredPanicBoundsB, ssa.OpRISCV64LoweredPanicBoundsC:
440 p := s.Prog(obj.ACALL)
441 p.To.Type = obj.TYPE_MEM
442 p.To.Name = obj.NAME_EXTERN
443 p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
444 s.UseArgs(16) // space used in callee args area by assembly stubs
446 case ssa.OpRISCV64LoweredAtomicLoad8:
448 p := s.Prog(riscv.AMOVBU)
449 p.From.Type = obj.TYPE_MEM
450 p.From.Reg = v.Args[0].Reg()
451 p.To.Type = obj.TYPE_REG
455 case ssa.OpRISCV64LoweredAtomicLoad32, ssa.OpRISCV64LoweredAtomicLoad64:
457 if v.Op == ssa.OpRISCV64LoweredAtomicLoad64 {
461 p.From.Type = obj.TYPE_MEM
462 p.From.Reg = v.Args[0].Reg()
463 p.To.Type = obj.TYPE_REG
466 case ssa.OpRISCV64LoweredAtomicStore8:
468 p := s.Prog(riscv.AMOVB)
469 p.From.Type = obj.TYPE_REG
470 p.From.Reg = v.Args[1].Reg()
471 p.To.Type = obj.TYPE_MEM
472 p.To.Reg = v.Args[0].Reg()
475 case ssa.OpRISCV64LoweredAtomicStore32, ssa.OpRISCV64LoweredAtomicStore64:
476 as := riscv.AAMOSWAPW
477 if v.Op == ssa.OpRISCV64LoweredAtomicStore64 {
481 p.From.Type = obj.TYPE_REG
482 p.From.Reg = v.Args[1].Reg()
483 p.To.Type = obj.TYPE_MEM
484 p.To.Reg = v.Args[0].Reg()
485 p.RegTo2 = riscv.REG_ZERO
487 case ssa.OpRISCV64LoweredAtomicAdd32, ssa.OpRISCV64LoweredAtomicAdd64:
489 if v.Op == ssa.OpRISCV64LoweredAtomicAdd64 {
493 p.From.Type = obj.TYPE_REG
494 p.From.Reg = v.Args[1].Reg()
495 p.To.Type = obj.TYPE_MEM
496 p.To.Reg = v.Args[0].Reg()
497 p.RegTo2 = riscv.REG_TMP
499 p2 := s.Prog(riscv.AADD)
500 p2.From.Type = obj.TYPE_REG
501 p2.From.Reg = riscv.REG_TMP
502 p2.Reg = v.Args[1].Reg()
503 p2.To.Type = obj.TYPE_REG
506 case ssa.OpRISCV64LoweredAtomicExchange32, ssa.OpRISCV64LoweredAtomicExchange64:
507 as := riscv.AAMOSWAPW
508 if v.Op == ssa.OpRISCV64LoweredAtomicExchange64 {
512 p.From.Type = obj.TYPE_REG
513 p.From.Reg = v.Args[1].Reg()
514 p.To.Type = obj.TYPE_MEM
515 p.To.Reg = v.Args[0].Reg()
518 case ssa.OpRISCV64LoweredAtomicCas32, ssa.OpRISCV64LoweredAtomicCas64:
521 // BNE Rtmp, Rarg1, 3(PC)
522 // SC Rarg2, (Rarg0), Rtmp
523 // BNE Rtmp, ZERO, -3(PC)
528 if v.Op == ssa.OpRISCV64LoweredAtomicCas64 {
533 r0 := v.Args[0].Reg()
534 r1 := v.Args[1].Reg()
535 r2 := v.Args[2].Reg()
538 p := s.Prog(riscv.AMOV)
539 p.From.Type = obj.TYPE_REG
540 p.From.Reg = riscv.REG_ZERO
541 p.To.Type = obj.TYPE_REG
545 p1.From.Type = obj.TYPE_MEM
547 p1.To.Type = obj.TYPE_REG
548 p1.To.Reg = riscv.REG_TMP
550 p2 := s.Prog(riscv.ABNE)
551 p2.From.Type = obj.TYPE_REG
553 p2.Reg = riscv.REG_TMP
554 p2.To.Type = obj.TYPE_BRANCH
557 p3.From.Type = obj.TYPE_REG
559 p3.To.Type = obj.TYPE_MEM
561 p3.RegTo2 = riscv.REG_TMP
563 p4 := s.Prog(riscv.ABNE)
564 p4.From.Type = obj.TYPE_REG
565 p4.From.Reg = riscv.REG_TMP
566 p4.Reg = riscv.REG_ZERO
567 p4.To.Type = obj.TYPE_BRANCH
570 p5 := s.Prog(riscv.AMOV)
571 p5.From.Type = obj.TYPE_CONST
573 p5.To.Type = obj.TYPE_REG
576 p6 := s.Prog(obj.ANOP)
579 case ssa.OpRISCV64LoweredAtomicAnd32, ssa.OpRISCV64LoweredAtomicOr32:
580 p := s.Prog(v.Op.Asm())
581 p.From.Type = obj.TYPE_REG
582 p.From.Reg = v.Args[1].Reg()
583 p.To.Type = obj.TYPE_MEM
584 p.To.Reg = v.Args[0].Reg()
585 p.RegTo2 = riscv.REG_ZERO
587 case ssa.OpRISCV64LoweredZero:
588 mov, sz := largestMove(v.AuxInt)
592 // BGEU Rarg1, Rarg0, -2(PC)
595 p.From.Type = obj.TYPE_REG
596 p.From.Reg = riscv.REG_ZERO
597 p.To.Type = obj.TYPE_MEM
598 p.To.Reg = v.Args[0].Reg()
600 p2 := s.Prog(riscv.AADD)
601 p2.From.Type = obj.TYPE_CONST
603 p2.To.Type = obj.TYPE_REG
604 p2.To.Reg = v.Args[0].Reg()
606 p3 := s.Prog(riscv.ABGEU)
607 p3.To.Type = obj.TYPE_BRANCH
608 p3.Reg = v.Args[0].Reg()
609 p3.From.Type = obj.TYPE_REG
610 p3.From.Reg = v.Args[1].Reg()
613 case ssa.OpRISCV64LoweredMove:
614 mov, sz := largestMove(v.AuxInt)
620 // BGEU Rarg2, Rarg0, -4(PC)
623 p.From.Type = obj.TYPE_MEM
624 p.From.Reg = v.Args[1].Reg()
625 p.To.Type = obj.TYPE_REG
626 p.To.Reg = riscv.REG_T2
629 p2.From.Type = obj.TYPE_REG
630 p2.From.Reg = riscv.REG_T2
631 p2.To.Type = obj.TYPE_MEM
632 p2.To.Reg = v.Args[0].Reg()
634 p3 := s.Prog(riscv.AADD)
635 p3.From.Type = obj.TYPE_CONST
637 p3.To.Type = obj.TYPE_REG
638 p3.To.Reg = v.Args[0].Reg()
640 p4 := s.Prog(riscv.AADD)
641 p4.From.Type = obj.TYPE_CONST
643 p4.To.Type = obj.TYPE_REG
644 p4.To.Reg = v.Args[1].Reg()
646 p5 := s.Prog(riscv.ABGEU)
647 p5.To.Type = obj.TYPE_BRANCH
648 p5.Reg = v.Args[1].Reg()
649 p5.From.Type = obj.TYPE_REG
650 p5.From.Reg = v.Args[2].Reg()
653 case ssa.OpRISCV64LoweredNilCheck:
654 // Issue a load which will fault if arg is nil.
655 // TODO: optimizations. See arm and amd64 LoweredNilCheck.
656 p := s.Prog(riscv.AMOVB)
657 p.From.Type = obj.TYPE_MEM
658 p.From.Reg = v.Args[0].Reg()
659 ssagen.AddAux(&p.From, v)
660 p.To.Type = obj.TYPE_REG
661 p.To.Reg = riscv.REG_ZERO
662 if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos == 1 in generated wrappers
663 base.WarnfAt(v.Pos, "generated nil check")
666 case ssa.OpRISCV64LoweredGetClosurePtr:
667 // Closure pointer is S10 (riscv.REG_CTXT).
668 ssagen.CheckLoweredGetClosurePtr(v)
670 case ssa.OpRISCV64LoweredGetCallerSP:
671 // caller's SP is FixedFrameSize below the address of the first arg
672 p := s.Prog(riscv.AMOV)
673 p.From.Type = obj.TYPE_ADDR
674 p.From.Offset = -base.Ctxt.Arch.FixedFrameSize
675 p.From.Name = obj.NAME_PARAM
676 p.To.Type = obj.TYPE_REG
679 case ssa.OpRISCV64LoweredGetCallerPC:
680 p := s.Prog(obj.AGETCALLERPC)
681 p.To.Type = obj.TYPE_REG
684 case ssa.OpRISCV64DUFFZERO:
685 p := s.Prog(obj.ADUFFZERO)
686 p.To.Type = obj.TYPE_MEM
687 p.To.Name = obj.NAME_EXTERN
688 p.To.Sym = ir.Syms.Duffzero
689 p.To.Offset = v.AuxInt
691 case ssa.OpRISCV64DUFFCOPY:
692 p := s.Prog(obj.ADUFFCOPY)
693 p.To.Type = obj.TYPE_MEM
694 p.To.Name = obj.NAME_EXTERN
695 p.To.Sym = ir.Syms.Duffcopy
696 p.To.Offset = v.AuxInt
698 case ssa.OpRISCV64LoweredPubBarrier:
702 case ssa.OpRISCV64LoweredRound32F, ssa.OpRISCV64LoweredRound64F:
703 // input is already rounded
705 case ssa.OpClobber, ssa.OpClobberReg:
706 // TODO: implement for clobberdead experiment. Nop is ok for now.
709 v.Fatalf("Unhandled op %v", v.Op)
713 var blockBranch = [...]obj.As{
714 ssa.BlockRISCV64BEQ: riscv.ABEQ,
715 ssa.BlockRISCV64BEQZ: riscv.ABEQZ,
716 ssa.BlockRISCV64BGE: riscv.ABGE,
717 ssa.BlockRISCV64BGEU: riscv.ABGEU,
718 ssa.BlockRISCV64BGEZ: riscv.ABGEZ,
719 ssa.BlockRISCV64BGTZ: riscv.ABGTZ,
720 ssa.BlockRISCV64BLEZ: riscv.ABLEZ,
721 ssa.BlockRISCV64BLT: riscv.ABLT,
722 ssa.BlockRISCV64BLTU: riscv.ABLTU,
723 ssa.BlockRISCV64BLTZ: riscv.ABLTZ,
724 ssa.BlockRISCV64BNE: riscv.ABNE,
725 ssa.BlockRISCV64BNEZ: riscv.ABNEZ,
728 func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
733 // defer returns in A0:
734 // 0 if we should continue executing
735 // 1 if we should jump to deferreturn call
736 p := s.Prog(riscv.ABNE)
737 p.To.Type = obj.TYPE_BRANCH
738 p.From.Type = obj.TYPE_REG
739 p.From.Reg = riscv.REG_ZERO
741 s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()})
742 if b.Succs[0].Block() != next {
743 p := s.Prog(obj.AJMP)
744 p.To.Type = obj.TYPE_BRANCH
745 s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
748 if b.Succs[0].Block() != next {
749 p := s.Prog(obj.AJMP)
750 p.To.Type = obj.TYPE_BRANCH
751 s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
753 case ssa.BlockExit, ssa.BlockRetJmp:
756 case ssa.BlockRISCV64BEQ, ssa.BlockRISCV64BEQZ, ssa.BlockRISCV64BNE, ssa.BlockRISCV64BNEZ,
757 ssa.BlockRISCV64BLT, ssa.BlockRISCV64BLEZ, ssa.BlockRISCV64BGE, ssa.BlockRISCV64BGEZ,
758 ssa.BlockRISCV64BLTZ, ssa.BlockRISCV64BGTZ, ssa.BlockRISCV64BLTU, ssa.BlockRISCV64BGEU:
760 as := blockBranch[b.Kind]
761 invAs := riscv.InvertBranch(as)
765 case b.Succs[0].Block():
766 p = s.Br(invAs, b.Succs[1].Block())
767 case b.Succs[1].Block():
768 p = s.Br(as, b.Succs[0].Block())
770 if b.Likely != ssa.BranchUnlikely {
771 p = s.Br(as, b.Succs[0].Block())
772 s.Br(obj.AJMP, b.Succs[1].Block())
774 p = s.Br(invAs, b.Succs[1].Block())
775 s.Br(obj.AJMP, b.Succs[0].Block())
779 p.From.Type = obj.TYPE_REG
781 case ssa.BlockRISCV64BEQ, ssa.BlockRISCV64BNE, ssa.BlockRISCV64BLT, ssa.BlockRISCV64BGE, ssa.BlockRISCV64BLTU, ssa.BlockRISCV64BGEU:
782 if b.NumControls() != 2 {
783 b.Fatalf("Unexpected number of controls (%d != 2): %s", b.NumControls(), b.LongString())
785 p.From.Reg = b.Controls[0].Reg()
786 p.Reg = b.Controls[1].Reg()
788 case ssa.BlockRISCV64BEQZ, ssa.BlockRISCV64BNEZ, ssa.BlockRISCV64BGEZ, ssa.BlockRISCV64BLEZ, ssa.BlockRISCV64BLTZ, ssa.BlockRISCV64BGTZ:
789 if b.NumControls() != 1 {
790 b.Fatalf("Unexpected number of controls (%d != 1): %s", b.NumControls(), b.LongString())
792 p.From.Reg = b.Controls[0].Reg()
796 b.Fatalf("Unhandled block: %s", b.LongString())
800 func loadRegResult(s *ssagen.State, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog {
801 p := s.Prog(loadByType(t))
802 p.From.Type = obj.TYPE_MEM
803 p.From.Name = obj.NAME_AUTO
804 p.From.Sym = n.Linksym()
805 p.From.Offset = n.FrameOffset() + off
806 p.To.Type = obj.TYPE_REG
811 func spillArgReg(pp *objw.Progs, p *obj.Prog, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog {
812 p = pp.Append(p, storeByType(t), obj.TYPE_REG, reg, 0, obj.TYPE_MEM, 0, n.FrameOffset()+off)
813 p.To.Name = obj.NAME_PARAM
814 p.To.Sym = n.Linksym()
815 p.Pos = p.Pos.WithNotStmt()