Skip to content
Open
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
30 changes: 15 additions & 15 deletions llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
Original file line number Diff line number Diff line change
Expand Up @@ -2982,21 +2982,21 @@ multiclass VPseudoVFWALU_WV_WF_RM {
multiclass VPseudoVMRG_VM_XM_IM {
foreach m = MxList in {
defvar mx = m.MX;
def "_VVM" # "_" # m.MX:
VPseudoTiedBinaryCarryIn<GetVRegNoV0<m.vrclass>.R,
m.vrclass, m.vrclass, m>,
SchedBinary<"WriteVIMergeV", "ReadVIMergeV", "ReadVIMergeV", mx,
forcePassthruRead=true>;
def "_VXM" # "_" # m.MX:
VPseudoTiedBinaryCarryIn<GetVRegNoV0<m.vrclass>.R,
m.vrclass, GPR, m>,
SchedBinary<"WriteVIMergeX", "ReadVIMergeV", "ReadVIMergeX", mx,
forcePassthruRead=true>;
def "_VIM" # "_" # m.MX:
VPseudoTiedBinaryCarryIn<GetVRegNoV0<m.vrclass>.R,
m.vrclass, simm5, m>,
SchedUnary<"WriteVIMergeI", "ReadVIMergeV", mx,
forcePassthruRead=true>;
def "_VVM"#"_"#m.MX : VPseudoTiedBinaryCarryIn<GetVRegNoV0<m.vrclass>.R,
GetVRegNoV0<m.vrclass>.R,
GetVRegNoV0<m.vrclass>.R, m>,
SchedBinary<"WriteVIMergeV", "ReadVIMergeV", "ReadVIMergeV", mx,
forcePassthruRead = true>;
def "_VXM"#"_"#m.MX
: VPseudoTiedBinaryCarryIn<GetVRegNoV0<m.vrclass>.R,
GetVRegNoV0<m.vrclass>.R, GPR, m>,
SchedBinary<"WriteVIMergeX", "ReadVIMergeV", "ReadVIMergeX", mx,
forcePassthruRead = true>;
def "_VIM"#"_"#m.MX
: VPseudoTiedBinaryCarryIn<GetVRegNoV0<m.vrclass>.R,
GetVRegNoV0<m.vrclass>.R, simm5, m>,
SchedUnary<"WriteVIMergeI", "ReadVIMergeV", mx,
forcePassthruRead = true>;
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -11,15 +11,15 @@ body: |
bb.0.entry:
; RV32I-LABEL: name: select_nxv1i8
; RV32I: [[DEF:%[0-9]+]]:vmv0 = IMPLICIT_DEF
; RV32I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
; RV32I-NEXT: [[DEF2:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
; RV32I-NEXT: [[PseudoVMERGE_VVM_MF4_:%[0-9]+]]:vrnov0 = PseudoVMERGE_VVM_MF4 [[DEF2]], [[DEF1]], [[DEF1]], [[DEF]], -1, 3 /* e8 */
; RV32I-NEXT: $v8 = COPY [[PseudoVMERGE_VVM_MF4_]]
; RV32I-NEXT: PseudoRET implicit $v8
;
; RV64I-LABEL: name: select_nxv1i8
; RV64I: [[DEF:%[0-9]+]]:vmv0 = IMPLICIT_DEF
; RV64I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
; RV64I-NEXT: [[DEF2:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
; RV64I-NEXT: [[PseudoVMERGE_VVM_MF4_:%[0-9]+]]:vrnov0 = PseudoVMERGE_VVM_MF4 [[DEF2]], [[DEF1]], [[DEF1]], [[DEF]], -1, 3 /* e8 */
; RV64I-NEXT: $v8 = COPY [[PseudoVMERGE_VVM_MF4_]]
Expand All @@ -40,15 +40,15 @@ body: |
bb.0.entry:
; RV32I-LABEL: name: select_nxv4i8
; RV32I: [[DEF:%[0-9]+]]:vmv0 = IMPLICIT_DEF
; RV32I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
; RV32I-NEXT: [[DEF2:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
; RV32I-NEXT: [[PseudoVMERGE_VVM_M1_:%[0-9]+]]:vrnov0 = PseudoVMERGE_VVM_M1 [[DEF2]], [[DEF1]], [[DEF1]], [[DEF]], -1, 3 /* e8 */
; RV32I-NEXT: $v8 = COPY [[PseudoVMERGE_VVM_M1_]]
; RV32I-NEXT: PseudoRET implicit $v8
;
; RV64I-LABEL: name: select_nxv4i8
; RV64I: [[DEF:%[0-9]+]]:vmv0 = IMPLICIT_DEF
; RV64I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
; RV64I-NEXT: [[DEF2:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
; RV64I-NEXT: [[PseudoVMERGE_VVM_M1_:%[0-9]+]]:vrnov0 = PseudoVMERGE_VVM_M1 [[DEF2]], [[DEF1]], [[DEF1]], [[DEF]], -1, 3 /* e8 */
; RV64I-NEXT: $v8 = COPY [[PseudoVMERGE_VVM_M1_]]
Expand All @@ -69,15 +69,15 @@ body: |
bb.0.entry:
; RV32I-LABEL: name: select_nxv16i8
; RV32I: [[DEF:%[0-9]+]]:vmv0 = IMPLICIT_DEF
; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm4 = IMPLICIT_DEF
; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm4nov0 = IMPLICIT_DEF
; RV32I-NEXT: [[DEF2:%[0-9]+]]:vrm4nov0 = IMPLICIT_DEF
; RV32I-NEXT: [[PseudoVMERGE_VVM_M4_:%[0-9]+]]:vrm4nov0 = PseudoVMERGE_VVM_M4 [[DEF2]], [[DEF1]], [[DEF1]], [[DEF]], -1, 3 /* e8 */
; RV32I-NEXT: $v8m4 = COPY [[PseudoVMERGE_VVM_M4_]]
; RV32I-NEXT: PseudoRET implicit $v8m4
;
; RV64I-LABEL: name: select_nxv16i8
; RV64I: [[DEF:%[0-9]+]]:vmv0 = IMPLICIT_DEF
; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm4 = IMPLICIT_DEF
; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm4nov0 = IMPLICIT_DEF
; RV64I-NEXT: [[DEF2:%[0-9]+]]:vrm4nov0 = IMPLICIT_DEF
; RV64I-NEXT: [[PseudoVMERGE_VVM_M4_:%[0-9]+]]:vrm4nov0 = PseudoVMERGE_VVM_M4 [[DEF2]], [[DEF1]], [[DEF1]], [[DEF]], -1, 3 /* e8 */
; RV64I-NEXT: $v8m4 = COPY [[PseudoVMERGE_VVM_M4_]]
Expand All @@ -98,15 +98,15 @@ body: |
bb.0.entry:
; RV32I-LABEL: name: select_nxv64i8
; RV32I: [[DEF:%[0-9]+]]:vmv0 = IMPLICIT_DEF
; RV32I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
; RV32I-NEXT: [[DEF2:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
; RV32I-NEXT: [[PseudoVMERGE_VVM_MF4_:%[0-9]+]]:vrnov0 = PseudoVMERGE_VVM_MF4 [[DEF2]], [[DEF1]], [[DEF1]], [[DEF]], -1, 4 /* e16 */
; RV32I-NEXT: $v8 = COPY [[PseudoVMERGE_VVM_MF4_]]
; RV32I-NEXT: PseudoRET implicit $v8
;
; RV64I-LABEL: name: select_nxv64i8
; RV64I: [[DEF:%[0-9]+]]:vmv0 = IMPLICIT_DEF
; RV64I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
; RV64I-NEXT: [[DEF2:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
; RV64I-NEXT: [[PseudoVMERGE_VVM_MF4_:%[0-9]+]]:vrnov0 = PseudoVMERGE_VVM_MF4 [[DEF2]], [[DEF1]], [[DEF1]], [[DEF]], -1, 4 /* e16 */
; RV64I-NEXT: $v8 = COPY [[PseudoVMERGE_VVM_MF4_]]
Expand All @@ -127,15 +127,15 @@ body: |
bb.0.entry:
; RV32I-LABEL: name: select_nxv2i16
; RV32I: [[DEF:%[0-9]+]]:vmv0 = IMPLICIT_DEF
; RV32I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
; RV32I-NEXT: [[DEF2:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
; RV32I-NEXT: [[PseudoVMERGE_VVM_M1_:%[0-9]+]]:vrnov0 = PseudoVMERGE_VVM_M1 [[DEF2]], [[DEF1]], [[DEF1]], [[DEF]], -1, 4 /* e16 */
; RV32I-NEXT: $v8 = COPY [[PseudoVMERGE_VVM_M1_]]
; RV32I-NEXT: PseudoRET implicit $v8
;
; RV64I-LABEL: name: select_nxv2i16
; RV64I: [[DEF:%[0-9]+]]:vmv0 = IMPLICIT_DEF
; RV64I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
; RV64I-NEXT: [[DEF2:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
; RV64I-NEXT: [[PseudoVMERGE_VVM_M1_:%[0-9]+]]:vrnov0 = PseudoVMERGE_VVM_M1 [[DEF2]], [[DEF1]], [[DEF1]], [[DEF]], -1, 4 /* e16 */
; RV64I-NEXT: $v8 = COPY [[PseudoVMERGE_VVM_M1_]]
Expand All @@ -156,15 +156,15 @@ body: |
bb.0.entry:
; RV32I-LABEL: name: select_nxv8i16
; RV32I: [[DEF:%[0-9]+]]:vmv0 = IMPLICIT_DEF
; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm4 = IMPLICIT_DEF
; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm4nov0 = IMPLICIT_DEF
; RV32I-NEXT: [[DEF2:%[0-9]+]]:vrm4nov0 = IMPLICIT_DEF
; RV32I-NEXT: [[PseudoVMERGE_VVM_M4_:%[0-9]+]]:vrm4nov0 = PseudoVMERGE_VVM_M4 [[DEF2]], [[DEF1]], [[DEF1]], [[DEF]], -1, 4 /* e16 */
; RV32I-NEXT: $v8m4 = COPY [[PseudoVMERGE_VVM_M4_]]
; RV32I-NEXT: PseudoRET implicit $v8m4
;
; RV64I-LABEL: name: select_nxv8i16
; RV64I: [[DEF:%[0-9]+]]:vmv0 = IMPLICIT_DEF
; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm4 = IMPLICIT_DEF
; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm4nov0 = IMPLICIT_DEF
; RV64I-NEXT: [[DEF2:%[0-9]+]]:vrm4nov0 = IMPLICIT_DEF
; RV64I-NEXT: [[PseudoVMERGE_VVM_M4_:%[0-9]+]]:vrm4nov0 = PseudoVMERGE_VVM_M4 [[DEF2]], [[DEF1]], [[DEF1]], [[DEF]], -1, 4 /* e16 */
; RV64I-NEXT: $v8m4 = COPY [[PseudoVMERGE_VVM_M4_]]
Expand All @@ -185,15 +185,15 @@ body: |
bb.0.entry:
; RV32I-LABEL: name: select_nxv32i16
; RV32I: [[DEF:%[0-9]+]]:vmv0 = IMPLICIT_DEF
; RV32I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
; RV32I-NEXT: [[DEF2:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
; RV32I-NEXT: [[PseudoVMERGE_VVM_MF2_:%[0-9]+]]:vrnov0 = PseudoVMERGE_VVM_MF2 [[DEF2]], [[DEF1]], [[DEF1]], [[DEF]], -1, 5 /* e32 */
; RV32I-NEXT: $v8 = COPY [[PseudoVMERGE_VVM_MF2_]]
; RV32I-NEXT: PseudoRET implicit $v8
;
; RV64I-LABEL: name: select_nxv32i16
; RV64I: [[DEF:%[0-9]+]]:vmv0 = IMPLICIT_DEF
; RV64I-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
; RV64I-NEXT: [[DEF2:%[0-9]+]]:vrnov0 = IMPLICIT_DEF
; RV64I-NEXT: [[PseudoVMERGE_VVM_MF2_:%[0-9]+]]:vrnov0 = PseudoVMERGE_VVM_MF2 [[DEF2]], [[DEF1]], [[DEF1]], [[DEF]], -1, 5 /* e32 */
; RV64I-NEXT: $v8 = COPY [[PseudoVMERGE_VVM_MF2_]]
Expand All @@ -214,15 +214,15 @@ body: |
bb.0.entry:
; RV32I-LABEL: name: select_nxv2i32
; RV32I: [[DEF:%[0-9]+]]:vmv0 = IMPLICIT_DEF
; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm2 = IMPLICIT_DEF
; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm2nov0 = IMPLICIT_DEF
; RV32I-NEXT: [[DEF2:%[0-9]+]]:vrm2nov0 = IMPLICIT_DEF
; RV32I-NEXT: [[PseudoVMERGE_VVM_M2_:%[0-9]+]]:vrm2nov0 = PseudoVMERGE_VVM_M2 [[DEF2]], [[DEF1]], [[DEF1]], [[DEF]], -1, 5 /* e32 */
; RV32I-NEXT: $v8m2 = COPY [[PseudoVMERGE_VVM_M2_]]
; RV32I-NEXT: PseudoRET implicit $v8m2
;
; RV64I-LABEL: name: select_nxv2i32
; RV64I: [[DEF:%[0-9]+]]:vmv0 = IMPLICIT_DEF
; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm2 = IMPLICIT_DEF
; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm2nov0 = IMPLICIT_DEF
; RV64I-NEXT: [[DEF2:%[0-9]+]]:vrm2nov0 = IMPLICIT_DEF
; RV64I-NEXT: [[PseudoVMERGE_VVM_M2_:%[0-9]+]]:vrm2nov0 = PseudoVMERGE_VVM_M2 [[DEF2]], [[DEF1]], [[DEF1]], [[DEF]], -1, 5 /* e32 */
; RV64I-NEXT: $v8m2 = COPY [[PseudoVMERGE_VVM_M2_]]
Expand All @@ -243,15 +243,15 @@ body: |
bb.0.entry:
; RV32I-LABEL: name: select_nxv8i32
; RV32I: [[DEF:%[0-9]+]]:vmv0 = IMPLICIT_DEF
; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm8 = IMPLICIT_DEF
; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm8nov0 = IMPLICIT_DEF
; RV32I-NEXT: [[DEF2:%[0-9]+]]:vrm8nov0 = IMPLICIT_DEF
; RV32I-NEXT: [[PseudoVMERGE_VVM_M8_:%[0-9]+]]:vrm8nov0 = PseudoVMERGE_VVM_M8 [[DEF2]], [[DEF1]], [[DEF1]], [[DEF]], -1, 5 /* e32 */
; RV32I-NEXT: $v8m8 = COPY [[PseudoVMERGE_VVM_M8_]]
; RV32I-NEXT: PseudoRET implicit $v8m8
;
; RV64I-LABEL: name: select_nxv8i32
; RV64I: [[DEF:%[0-9]+]]:vmv0 = IMPLICIT_DEF
; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm8 = IMPLICIT_DEF
; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm8nov0 = IMPLICIT_DEF
; RV64I-NEXT: [[DEF2:%[0-9]+]]:vrm8nov0 = IMPLICIT_DEF
; RV64I-NEXT: [[PseudoVMERGE_VVM_M8_:%[0-9]+]]:vrm8nov0 = PseudoVMERGE_VVM_M8 [[DEF2]], [[DEF1]], [[DEF1]], [[DEF]], -1, 5 /* e32 */
; RV64I-NEXT: $v8m8 = COPY [[PseudoVMERGE_VVM_M8_]]
Expand All @@ -272,15 +272,15 @@ body: |
bb.0.entry:
; RV32I-LABEL: name: select_nxv1i64
; RV32I: [[DEF:%[0-9]+]]:vmv0 = IMPLICIT_DEF
; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm2 = IMPLICIT_DEF
; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm2nov0 = IMPLICIT_DEF
; RV32I-NEXT: [[DEF2:%[0-9]+]]:vrm2nov0 = IMPLICIT_DEF
; RV32I-NEXT: [[PseudoVMERGE_VVM_M2_:%[0-9]+]]:vrm2nov0 = PseudoVMERGE_VVM_M2 [[DEF2]], [[DEF1]], [[DEF1]], [[DEF]], -1, 6 /* e64 */
; RV32I-NEXT: $v8m2 = COPY [[PseudoVMERGE_VVM_M2_]]
; RV32I-NEXT: PseudoRET implicit $v8m2
;
; RV64I-LABEL: name: select_nxv1i64
; RV64I: [[DEF:%[0-9]+]]:vmv0 = IMPLICIT_DEF
; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm2 = IMPLICIT_DEF
; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm2nov0 = IMPLICIT_DEF
; RV64I-NEXT: [[DEF2:%[0-9]+]]:vrm2nov0 = IMPLICIT_DEF
; RV64I-NEXT: [[PseudoVMERGE_VVM_M2_:%[0-9]+]]:vrm2nov0 = PseudoVMERGE_VVM_M2 [[DEF2]], [[DEF1]], [[DEF1]], [[DEF]], -1, 6 /* e64 */
; RV64I-NEXT: $v8m2 = COPY [[PseudoVMERGE_VVM_M2_]]
Expand All @@ -301,15 +301,15 @@ body: |
bb.0.entry:
; RV32I-LABEL: name: select_nxv4i64
; RV32I: [[DEF:%[0-9]+]]:vmv0 = IMPLICIT_DEF
; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm8 = IMPLICIT_DEF
; RV32I-NEXT: [[DEF1:%[0-9]+]]:vrm8nov0 = IMPLICIT_DEF
; RV32I-NEXT: [[DEF2:%[0-9]+]]:vrm8nov0 = IMPLICIT_DEF
; RV32I-NEXT: [[PseudoVMERGE_VVM_M8_:%[0-9]+]]:vrm8nov0 = PseudoVMERGE_VVM_M8 [[DEF2]], [[DEF1]], [[DEF1]], [[DEF]], -1, 6 /* e64 */
; RV32I-NEXT: $v8m8 = COPY [[PseudoVMERGE_VVM_M8_]]
; RV32I-NEXT: PseudoRET implicit $v8m8
;
; RV64I-LABEL: name: select_nxv4i64
; RV64I: [[DEF:%[0-9]+]]:vmv0 = IMPLICIT_DEF
; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm8 = IMPLICIT_DEF
; RV64I-NEXT: [[DEF1:%[0-9]+]]:vrm8nov0 = IMPLICIT_DEF
; RV64I-NEXT: [[DEF2:%[0-9]+]]:vrm8nov0 = IMPLICIT_DEF
; RV64I-NEXT: [[PseudoVMERGE_VVM_M8_:%[0-9]+]]:vrm8nov0 = PseudoVMERGE_VVM_M8 [[DEF2]], [[DEF1]], [[DEF1]], [[DEF]], -1, 6 /* e64 */
; RV64I-NEXT: $v8m8 = COPY [[PseudoVMERGE_VVM_M8_]]
Expand Down
Loading