1 // Copyright 2016 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
8 "cmd/compile/internal/base"
9 "cmd/compile/internal/ir"
10 "cmd/compile/internal/objw"
11 "cmd/compile/internal/ssa"
12 "cmd/compile/internal/ssagen"
13 "cmd/compile/internal/types"
15 "cmd/internal/obj/riscv"
18 // ssaRegToReg maps ssa register numbers to obj register numbers.
19 var ssaRegToReg = []int16{
84 0, // SB isn't a real register. We fill an Addr.Reg field with 0 in this case.
87 func loadByType(t *types.Type) obj.As {
97 base.Fatalf("unknown float width for load %d in type %v", width, t)
124 base.Fatalf("unknown width for load %d in type %v", width, t)
129 // storeByType returns the store instruction of the given type.
130 func storeByType(t *types.Type) obj.As {
140 base.Fatalf("unknown float width for store %d in type %v", width, t)
155 base.Fatalf("unknown width for store %d in type %v", width, t)
160 // largestMove returns the largest move instruction possible and its size,
161 // given the alignment of the total size of the move.
163 // e.g., a 16-byte move may use MOV, but an 11-byte move must use MOVB.
165 // Note that the moves may not be on naturally aligned addresses depending on
166 // the source and destination.
168 // This matches the calculation in ssa.moveSize.
169 func largestMove(alignment int64) (obj.As, int64) {
171 case alignment%8 == 0:
173 case alignment%4 == 0:
174 return riscv.AMOVW, 4
175 case alignment%2 == 0:
176 return riscv.AMOVH, 2
178 return riscv.AMOVB, 1
182 // ssaMarkMoves marks any MOVXconst ops that need to avoid clobbering flags.
183 // RISC-V has no flags, so this is a no-op.
184 func ssaMarkMoves(s *ssagen.State, b *ssa.Block) {}
186 func ssaGenValue(s *ssagen.State, v *ssa.Value) {
191 // memory arg needs no code
193 // input args need no code
195 ssagen.CheckLoweredPhi(v)
196 case ssa.OpCopy, ssa.OpRISCV64MOVconvert, ssa.OpRISCV64MOVDreg:
197 if v.Type.IsMemory() {
200 rs := v.Args[0].Reg()
206 if v.Type.IsFloat() {
210 p.From.Type = obj.TYPE_REG
212 p.To.Type = obj.TYPE_REG
214 case ssa.OpRISCV64MOVDnop:
217 if v.Type.IsFlags() {
218 v.Fatalf("load flags not implemented: %v", v.LongString())
221 p := s.Prog(loadByType(v.Type))
222 ssagen.AddrAuto(&p.From, v.Args[0])
223 p.To.Type = obj.TYPE_REG
226 if v.Type.IsFlags() {
227 v.Fatalf("store flags not implemented: %v", v.LongString())
230 p := s.Prog(storeByType(v.Type))
231 p.From.Type = obj.TYPE_REG
232 p.From.Reg = v.Args[0].Reg()
233 ssagen.AddrAuto(&p.To, v)
234 case ssa.OpArgIntReg, ssa.OpArgFloatReg:
235 // The assembler needs to wrap the entry safepoint/stack growth code with spill/unspill
236 // The loop only runs once.
237 for _, a := range v.Block.Func.RegArgs {
238 // Pass the spill/unspill information along to the assembler, offset by size of
239 // the saved LR slot.
240 addr := ssagen.SpillSlotAddr(a, riscv.REG_SP, base.Ctxt.Arch.FixedFrameSize)
241 s.FuncInfo().AddSpill(
242 obj.RegSpill{Reg: a.Reg, Addr: addr, Unspill: loadByType(a.Type), Spill: storeByType(a.Type)})
244 v.Block.Func.RegArgs = nil
246 ssagen.CheckArgReg(v)
247 case ssa.OpSP, ssa.OpSB, ssa.OpGetG:
249 case ssa.OpRISCV64MOVBreg, ssa.OpRISCV64MOVHreg, ssa.OpRISCV64MOVWreg,
250 ssa.OpRISCV64MOVBUreg, ssa.OpRISCV64MOVHUreg, ssa.OpRISCV64MOVWUreg:
252 for a.Op == ssa.OpCopy || a.Op == ssa.OpRISCV64MOVDreg {
256 rs := v.Args[0].Reg()
258 if a.Op == ssa.OpLoadReg {
261 case v.Op == ssa.OpRISCV64MOVBreg && t.Size() == 1 && t.IsSigned(),
262 v.Op == ssa.OpRISCV64MOVHreg && t.Size() == 2 && t.IsSigned(),
263 v.Op == ssa.OpRISCV64MOVWreg && t.Size() == 4 && t.IsSigned(),
264 v.Op == ssa.OpRISCV64MOVBUreg && t.Size() == 1 && !t.IsSigned(),
265 v.Op == ssa.OpRISCV64MOVHUreg && t.Size() == 2 && !t.IsSigned(),
266 v.Op == ssa.OpRISCV64MOVWUreg && t.Size() == 4 && !t.IsSigned():
267 // arg is a proper-typed load and already sign/zero-extended
276 p.From.Type = obj.TYPE_REG
278 p.To.Type = obj.TYPE_REG
280 case ssa.OpRISCV64ADD, ssa.OpRISCV64SUB, ssa.OpRISCV64SUBW, ssa.OpRISCV64XOR, ssa.OpRISCV64OR, ssa.OpRISCV64AND,
281 ssa.OpRISCV64SLL, ssa.OpRISCV64SRA, ssa.OpRISCV64SRL,
282 ssa.OpRISCV64SLT, ssa.OpRISCV64SLTU, ssa.OpRISCV64MUL, ssa.OpRISCV64MULW, ssa.OpRISCV64MULH,
283 ssa.OpRISCV64MULHU, ssa.OpRISCV64DIV, ssa.OpRISCV64DIVU, ssa.OpRISCV64DIVW,
284 ssa.OpRISCV64DIVUW, ssa.OpRISCV64REM, ssa.OpRISCV64REMU, ssa.OpRISCV64REMW,
286 ssa.OpRISCV64FADDS, ssa.OpRISCV64FSUBS, ssa.OpRISCV64FMULS, ssa.OpRISCV64FDIVS,
287 ssa.OpRISCV64FEQS, ssa.OpRISCV64FNES, ssa.OpRISCV64FLTS, ssa.OpRISCV64FLES,
288 ssa.OpRISCV64FADDD, ssa.OpRISCV64FSUBD, ssa.OpRISCV64FMULD, ssa.OpRISCV64FDIVD,
289 ssa.OpRISCV64FEQD, ssa.OpRISCV64FNED, ssa.OpRISCV64FLTD, ssa.OpRISCV64FLED,
292 r1 := v.Args[0].Reg()
293 r2 := v.Args[1].Reg()
294 p := s.Prog(v.Op.Asm())
295 p.From.Type = obj.TYPE_REG
298 p.To.Type = obj.TYPE_REG
300 case ssa.OpRISCV64LoweredMuluhilo:
301 r0 := v.Args[0].Reg()
302 r1 := v.Args[1].Reg()
303 p := s.Prog(riscv.AMULHU)
304 p.From.Type = obj.TYPE_REG
307 p.To.Type = obj.TYPE_REG
309 p1 := s.Prog(riscv.AMUL)
310 p1.From.Type = obj.TYPE_REG
313 p1.To.Type = obj.TYPE_REG
315 case ssa.OpRISCV64LoweredMuluover:
316 r0 := v.Args[0].Reg()
317 r1 := v.Args[1].Reg()
318 p := s.Prog(riscv.AMULHU)
319 p.From.Type = obj.TYPE_REG
322 p.To.Type = obj.TYPE_REG
324 p1 := s.Prog(riscv.AMUL)
325 p1.From.Type = obj.TYPE_REG
328 p1.To.Type = obj.TYPE_REG
330 p2 := s.Prog(riscv.ASNEZ)
331 p2.From.Type = obj.TYPE_REG
332 p2.From.Reg = v.Reg1()
333 p2.To.Type = obj.TYPE_REG
335 case ssa.OpRISCV64FMADDD, ssa.OpRISCV64FMSUBD, ssa.OpRISCV64FNMADDD, ssa.OpRISCV64FNMSUBD:
337 r1 := v.Args[0].Reg()
338 r2 := v.Args[1].Reg()
339 r3 := v.Args[2].Reg()
340 p := s.Prog(v.Op.Asm())
341 p.From.Type = obj.TYPE_REG
344 p.AddRestSource(obj.Addr{Type: obj.TYPE_REG, Reg: r3})
345 p.To.Type = obj.TYPE_REG
347 case ssa.OpRISCV64FSQRTS, ssa.OpRISCV64FNEGS, ssa.OpRISCV64FABSD, ssa.OpRISCV64FSQRTD, ssa.OpRISCV64FNEGD,
348 ssa.OpRISCV64FMVSX, ssa.OpRISCV64FMVDX,
349 ssa.OpRISCV64FCVTSW, ssa.OpRISCV64FCVTSL, ssa.OpRISCV64FCVTWS, ssa.OpRISCV64FCVTLS,
350 ssa.OpRISCV64FCVTDW, ssa.OpRISCV64FCVTDL, ssa.OpRISCV64FCVTWD, ssa.OpRISCV64FCVTLD, ssa.OpRISCV64FCVTDS, ssa.OpRISCV64FCVTSD,
351 ssa.OpRISCV64NOT, ssa.OpRISCV64NEG, ssa.OpRISCV64NEGW:
352 p := s.Prog(v.Op.Asm())
353 p.From.Type = obj.TYPE_REG
354 p.From.Reg = v.Args[0].Reg()
355 p.To.Type = obj.TYPE_REG
357 case ssa.OpRISCV64ADDI, ssa.OpRISCV64ADDIW, ssa.OpRISCV64XORI, ssa.OpRISCV64ORI, ssa.OpRISCV64ANDI,
358 ssa.OpRISCV64SLLI, ssa.OpRISCV64SRAI, ssa.OpRISCV64SRLI, ssa.OpRISCV64SLTI,
360 p := s.Prog(v.Op.Asm())
361 p.From.Type = obj.TYPE_CONST
362 p.From.Offset = v.AuxInt
363 p.Reg = v.Args[0].Reg()
364 p.To.Type = obj.TYPE_REG
366 case ssa.OpRISCV64MOVDconst:
367 p := s.Prog(v.Op.Asm())
368 p.From.Type = obj.TYPE_CONST
369 p.From.Offset = v.AuxInt
370 p.To.Type = obj.TYPE_REG
372 case ssa.OpRISCV64MOVaddr:
373 p := s.Prog(v.Op.Asm())
374 p.From.Type = obj.TYPE_ADDR
375 p.To.Type = obj.TYPE_REG
379 // MOVW $sym+off(base), R
380 switch v.Aux.(type) {
382 v.Fatalf("aux is of unknown type %T", v.Aux)
385 ssagen.AddAux(&p.From, v)
388 ssagen.AddAux(&p.From, v)
390 // No sym, just MOVW $off(SP), R
392 p.From.Reg = riscv.REG_SP
393 p.From.Offset = v.AuxInt
395 if reg := v.Args[0].RegName(); reg != wantreg {
396 v.Fatalf("bad reg %s for symbol type %T, want %s", reg, v.Aux, wantreg)
398 case ssa.OpRISCV64MOVBload, ssa.OpRISCV64MOVHload, ssa.OpRISCV64MOVWload, ssa.OpRISCV64MOVDload,
399 ssa.OpRISCV64MOVBUload, ssa.OpRISCV64MOVHUload, ssa.OpRISCV64MOVWUload,
400 ssa.OpRISCV64FMOVWload, ssa.OpRISCV64FMOVDload:
401 p := s.Prog(v.Op.Asm())
402 p.From.Type = obj.TYPE_MEM
403 p.From.Reg = v.Args[0].Reg()
404 ssagen.AddAux(&p.From, v)
405 p.To.Type = obj.TYPE_REG
407 case ssa.OpRISCV64MOVBstore, ssa.OpRISCV64MOVHstore, ssa.OpRISCV64MOVWstore, ssa.OpRISCV64MOVDstore,
408 ssa.OpRISCV64FMOVWstore, ssa.OpRISCV64FMOVDstore:
409 p := s.Prog(v.Op.Asm())
410 p.From.Type = obj.TYPE_REG
411 p.From.Reg = v.Args[1].Reg()
412 p.To.Type = obj.TYPE_MEM
413 p.To.Reg = v.Args[0].Reg()
414 ssagen.AddAux(&p.To, v)
415 case ssa.OpRISCV64MOVBstorezero, ssa.OpRISCV64MOVHstorezero, ssa.OpRISCV64MOVWstorezero, ssa.OpRISCV64MOVDstorezero:
416 p := s.Prog(v.Op.Asm())
417 p.From.Type = obj.TYPE_REG
418 p.From.Reg = riscv.REG_ZERO
419 p.To.Type = obj.TYPE_MEM
420 p.To.Reg = v.Args[0].Reg()
421 ssagen.AddAux(&p.To, v)
422 case ssa.OpRISCV64SEQZ, ssa.OpRISCV64SNEZ:
423 p := s.Prog(v.Op.Asm())
424 p.From.Type = obj.TYPE_REG
425 p.From.Reg = v.Args[0].Reg()
426 p.To.Type = obj.TYPE_REG
428 case ssa.OpRISCV64CALLstatic, ssa.OpRISCV64CALLclosure, ssa.OpRISCV64CALLinter:
430 case ssa.OpRISCV64CALLtail:
432 case ssa.OpRISCV64LoweredWB:
433 p := s.Prog(obj.ACALL)
434 p.To.Type = obj.TYPE_MEM
435 p.To.Name = obj.NAME_EXTERN
436 // AuxInt encodes how many buffer entries we need.
437 p.To.Sym = ir.Syms.GCWriteBarrier[v.AuxInt-1]
438 case ssa.OpRISCV64LoweredPanicBoundsA, ssa.OpRISCV64LoweredPanicBoundsB, ssa.OpRISCV64LoweredPanicBoundsC:
439 p := s.Prog(obj.ACALL)
440 p.To.Type = obj.TYPE_MEM
441 p.To.Name = obj.NAME_EXTERN
442 p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
443 s.UseArgs(16) // space used in callee args area by assembly stubs
445 case ssa.OpRISCV64LoweredAtomicLoad8:
447 p := s.Prog(riscv.AMOVBU)
448 p.From.Type = obj.TYPE_MEM
449 p.From.Reg = v.Args[0].Reg()
450 p.To.Type = obj.TYPE_REG
454 case ssa.OpRISCV64LoweredAtomicLoad32, ssa.OpRISCV64LoweredAtomicLoad64:
456 if v.Op == ssa.OpRISCV64LoweredAtomicLoad64 {
460 p.From.Type = obj.TYPE_MEM
461 p.From.Reg = v.Args[0].Reg()
462 p.To.Type = obj.TYPE_REG
465 case ssa.OpRISCV64LoweredAtomicStore8:
467 p := s.Prog(riscv.AMOVB)
468 p.From.Type = obj.TYPE_REG
469 p.From.Reg = v.Args[1].Reg()
470 p.To.Type = obj.TYPE_MEM
471 p.To.Reg = v.Args[0].Reg()
474 case ssa.OpRISCV64LoweredAtomicStore32, ssa.OpRISCV64LoweredAtomicStore64:
475 as := riscv.AAMOSWAPW
476 if v.Op == ssa.OpRISCV64LoweredAtomicStore64 {
480 p.From.Type = obj.TYPE_REG
481 p.From.Reg = v.Args[1].Reg()
482 p.To.Type = obj.TYPE_MEM
483 p.To.Reg = v.Args[0].Reg()
484 p.RegTo2 = riscv.REG_ZERO
486 case ssa.OpRISCV64LoweredAtomicAdd32, ssa.OpRISCV64LoweredAtomicAdd64:
488 if v.Op == ssa.OpRISCV64LoweredAtomicAdd64 {
492 p.From.Type = obj.TYPE_REG
493 p.From.Reg = v.Args[1].Reg()
494 p.To.Type = obj.TYPE_MEM
495 p.To.Reg = v.Args[0].Reg()
496 p.RegTo2 = riscv.REG_TMP
498 p2 := s.Prog(riscv.AADD)
499 p2.From.Type = obj.TYPE_REG
500 p2.From.Reg = riscv.REG_TMP
501 p2.Reg = v.Args[1].Reg()
502 p2.To.Type = obj.TYPE_REG
505 case ssa.OpRISCV64LoweredAtomicExchange32, ssa.OpRISCV64LoweredAtomicExchange64:
506 as := riscv.AAMOSWAPW
507 if v.Op == ssa.OpRISCV64LoweredAtomicExchange64 {
511 p.From.Type = obj.TYPE_REG
512 p.From.Reg = v.Args[1].Reg()
513 p.To.Type = obj.TYPE_MEM
514 p.To.Reg = v.Args[0].Reg()
517 case ssa.OpRISCV64LoweredAtomicCas32, ssa.OpRISCV64LoweredAtomicCas64:
520 // BNE Rtmp, Rarg1, 3(PC)
521 // SC Rarg2, (Rarg0), Rtmp
522 // BNE Rtmp, ZERO, -3(PC)
527 if v.Op == ssa.OpRISCV64LoweredAtomicCas64 {
532 r0 := v.Args[0].Reg()
533 r1 := v.Args[1].Reg()
534 r2 := v.Args[2].Reg()
537 p := s.Prog(riscv.AMOV)
538 p.From.Type = obj.TYPE_REG
539 p.From.Reg = riscv.REG_ZERO
540 p.To.Type = obj.TYPE_REG
544 p1.From.Type = obj.TYPE_MEM
546 p1.To.Type = obj.TYPE_REG
547 p1.To.Reg = riscv.REG_TMP
549 p2 := s.Prog(riscv.ABNE)
550 p2.From.Type = obj.TYPE_REG
552 p2.Reg = riscv.REG_TMP
553 p2.To.Type = obj.TYPE_BRANCH
556 p3.From.Type = obj.TYPE_REG
558 p3.To.Type = obj.TYPE_MEM
560 p3.RegTo2 = riscv.REG_TMP
562 p4 := s.Prog(riscv.ABNE)
563 p4.From.Type = obj.TYPE_REG
564 p4.From.Reg = riscv.REG_TMP
565 p4.Reg = riscv.REG_ZERO
566 p4.To.Type = obj.TYPE_BRANCH
569 p5 := s.Prog(riscv.AMOV)
570 p5.From.Type = obj.TYPE_CONST
572 p5.To.Type = obj.TYPE_REG
575 p6 := s.Prog(obj.ANOP)
578 case ssa.OpRISCV64LoweredAtomicAnd32, ssa.OpRISCV64LoweredAtomicOr32:
579 p := s.Prog(v.Op.Asm())
580 p.From.Type = obj.TYPE_REG
581 p.From.Reg = v.Args[1].Reg()
582 p.To.Type = obj.TYPE_MEM
583 p.To.Reg = v.Args[0].Reg()
584 p.RegTo2 = riscv.REG_ZERO
586 case ssa.OpRISCV64LoweredZero:
587 mov, sz := largestMove(v.AuxInt)
591 // BGEU Rarg1, Rarg0, -2(PC)
594 p.From.Type = obj.TYPE_REG
595 p.From.Reg = riscv.REG_ZERO
596 p.To.Type = obj.TYPE_MEM
597 p.To.Reg = v.Args[0].Reg()
599 p2 := s.Prog(riscv.AADD)
600 p2.From.Type = obj.TYPE_CONST
602 p2.To.Type = obj.TYPE_REG
603 p2.To.Reg = v.Args[0].Reg()
605 p3 := s.Prog(riscv.ABGEU)
606 p3.To.Type = obj.TYPE_BRANCH
607 p3.Reg = v.Args[0].Reg()
608 p3.From.Type = obj.TYPE_REG
609 p3.From.Reg = v.Args[1].Reg()
612 case ssa.OpRISCV64LoweredMove:
613 mov, sz := largestMove(v.AuxInt)
619 // BGEU Rarg2, Rarg0, -4(PC)
622 p.From.Type = obj.TYPE_MEM
623 p.From.Reg = v.Args[1].Reg()
624 p.To.Type = obj.TYPE_REG
625 p.To.Reg = riscv.REG_T2
628 p2.From.Type = obj.TYPE_REG
629 p2.From.Reg = riscv.REG_T2
630 p2.To.Type = obj.TYPE_MEM
631 p2.To.Reg = v.Args[0].Reg()
633 p3 := s.Prog(riscv.AADD)
634 p3.From.Type = obj.TYPE_CONST
636 p3.To.Type = obj.TYPE_REG
637 p3.To.Reg = v.Args[0].Reg()
639 p4 := s.Prog(riscv.AADD)
640 p4.From.Type = obj.TYPE_CONST
642 p4.To.Type = obj.TYPE_REG
643 p4.To.Reg = v.Args[1].Reg()
645 p5 := s.Prog(riscv.ABGEU)
646 p5.To.Type = obj.TYPE_BRANCH
647 p5.Reg = v.Args[1].Reg()
648 p5.From.Type = obj.TYPE_REG
649 p5.From.Reg = v.Args[2].Reg()
652 case ssa.OpRISCV64LoweredNilCheck:
653 // Issue a load which will fault if arg is nil.
654 // TODO: optimizations. See arm and amd64 LoweredNilCheck.
655 p := s.Prog(riscv.AMOVB)
656 p.From.Type = obj.TYPE_MEM
657 p.From.Reg = v.Args[0].Reg()
658 ssagen.AddAux(&p.From, v)
659 p.To.Type = obj.TYPE_REG
660 p.To.Reg = riscv.REG_ZERO
661 if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos == 1 in generated wrappers
662 base.WarnfAt(v.Pos, "generated nil check")
665 case ssa.OpRISCV64LoweredGetClosurePtr:
666 // Closure pointer is S10 (riscv.REG_CTXT).
667 ssagen.CheckLoweredGetClosurePtr(v)
669 case ssa.OpRISCV64LoweredGetCallerSP:
670 // caller's SP is FixedFrameSize below the address of the first arg
671 p := s.Prog(riscv.AMOV)
672 p.From.Type = obj.TYPE_ADDR
673 p.From.Offset = -base.Ctxt.Arch.FixedFrameSize
674 p.From.Name = obj.NAME_PARAM
675 p.To.Type = obj.TYPE_REG
678 case ssa.OpRISCV64LoweredGetCallerPC:
679 p := s.Prog(obj.AGETCALLERPC)
680 p.To.Type = obj.TYPE_REG
683 case ssa.OpRISCV64DUFFZERO:
684 p := s.Prog(obj.ADUFFZERO)
685 p.To.Type = obj.TYPE_MEM
686 p.To.Name = obj.NAME_EXTERN
687 p.To.Sym = ir.Syms.Duffzero
688 p.To.Offset = v.AuxInt
690 case ssa.OpRISCV64DUFFCOPY:
691 p := s.Prog(obj.ADUFFCOPY)
692 p.To.Type = obj.TYPE_MEM
693 p.To.Name = obj.NAME_EXTERN
694 p.To.Sym = ir.Syms.Duffcopy
695 p.To.Offset = v.AuxInt
697 case ssa.OpClobber, ssa.OpClobberReg:
698 // TODO: implement for clobberdead experiment. Nop is ok for now.
701 v.Fatalf("Unhandled op %v", v.Op)
705 var blockBranch = [...]obj.As{
706 ssa.BlockRISCV64BEQ: riscv.ABEQ,
707 ssa.BlockRISCV64BEQZ: riscv.ABEQZ,
708 ssa.BlockRISCV64BGE: riscv.ABGE,
709 ssa.BlockRISCV64BGEU: riscv.ABGEU,
710 ssa.BlockRISCV64BGEZ: riscv.ABGEZ,
711 ssa.BlockRISCV64BGTZ: riscv.ABGTZ,
712 ssa.BlockRISCV64BLEZ: riscv.ABLEZ,
713 ssa.BlockRISCV64BLT: riscv.ABLT,
714 ssa.BlockRISCV64BLTU: riscv.ABLTU,
715 ssa.BlockRISCV64BLTZ: riscv.ABLTZ,
716 ssa.BlockRISCV64BNE: riscv.ABNE,
717 ssa.BlockRISCV64BNEZ: riscv.ABNEZ,
720 func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
725 // defer returns in A0:
726 // 0 if we should continue executing
727 // 1 if we should jump to deferreturn call
728 p := s.Prog(riscv.ABNE)
729 p.To.Type = obj.TYPE_BRANCH
730 p.From.Type = obj.TYPE_REG
731 p.From.Reg = riscv.REG_ZERO
733 s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()})
734 if b.Succs[0].Block() != next {
735 p := s.Prog(obj.AJMP)
736 p.To.Type = obj.TYPE_BRANCH
737 s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
740 if b.Succs[0].Block() != next {
741 p := s.Prog(obj.AJMP)
742 p.To.Type = obj.TYPE_BRANCH
743 s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
745 case ssa.BlockExit, ssa.BlockRetJmp:
748 case ssa.BlockRISCV64BEQ, ssa.BlockRISCV64BEQZ, ssa.BlockRISCV64BNE, ssa.BlockRISCV64BNEZ,
749 ssa.BlockRISCV64BLT, ssa.BlockRISCV64BLEZ, ssa.BlockRISCV64BGE, ssa.BlockRISCV64BGEZ,
750 ssa.BlockRISCV64BLTZ, ssa.BlockRISCV64BGTZ, ssa.BlockRISCV64BLTU, ssa.BlockRISCV64BGEU:
752 as := blockBranch[b.Kind]
753 invAs := riscv.InvertBranch(as)
757 case b.Succs[0].Block():
758 p = s.Br(invAs, b.Succs[1].Block())
759 case b.Succs[1].Block():
760 p = s.Br(as, b.Succs[0].Block())
762 if b.Likely != ssa.BranchUnlikely {
763 p = s.Br(as, b.Succs[0].Block())
764 s.Br(obj.AJMP, b.Succs[1].Block())
766 p = s.Br(invAs, b.Succs[1].Block())
767 s.Br(obj.AJMP, b.Succs[0].Block())
771 p.From.Type = obj.TYPE_REG
773 case ssa.BlockRISCV64BEQ, ssa.BlockRISCV64BNE, ssa.BlockRISCV64BLT, ssa.BlockRISCV64BGE, ssa.BlockRISCV64BLTU, ssa.BlockRISCV64BGEU:
774 if b.NumControls() != 2 {
775 b.Fatalf("Unexpected number of controls (%d != 2): %s", b.NumControls(), b.LongString())
777 p.From.Reg = b.Controls[0].Reg()
778 p.Reg = b.Controls[1].Reg()
780 case ssa.BlockRISCV64BEQZ, ssa.BlockRISCV64BNEZ, ssa.BlockRISCV64BGEZ, ssa.BlockRISCV64BLEZ, ssa.BlockRISCV64BLTZ, ssa.BlockRISCV64BGTZ:
781 if b.NumControls() != 1 {
782 b.Fatalf("Unexpected number of controls (%d != 1): %s", b.NumControls(), b.LongString())
784 p.From.Reg = b.Controls[0].Reg()
788 b.Fatalf("Unhandled block: %s", b.LongString())
792 func loadRegResult(s *ssagen.State, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog {
793 p := s.Prog(loadByType(t))
794 p.From.Type = obj.TYPE_MEM
795 p.From.Name = obj.NAME_AUTO
796 p.From.Sym = n.Linksym()
797 p.From.Offset = n.FrameOffset() + off
798 p.To.Type = obj.TYPE_REG
803 func spillArgReg(pp *objw.Progs, p *obj.Prog, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog {
804 p = pp.Append(p, storeByType(t), obj.TYPE_REG, reg, 0, obj.TYPE_MEM, 0, n.FrameOffset()+off)
805 p.To.Name = obj.NAME_PARAM
806 p.To.Sym = n.Linksym()
807 p.Pos = p.Pos.WithNotStmt()