Skip to content

Conversation

@lukel97
Copy link
Contributor

@lukel97 lukel97 commented Sep 19, 2023

This is the VP equivalent of #65674. We already combine MGATHER loads with unit stride to MLOAD, so this extends it for EXPERIMENTAL_VP_STRIDED_LOAD.

I've added the tests in a separate commit in this PR so you can see the test diff.

@llvmbot llvmbot added the llvm:SelectionDAG SelectionDAGISel as well label Sep 19, 2023
@llvmbot
Copy link
Member

llvmbot commented Sep 19, 2023

@llvm/pr-subscribers-llvm-selectiondag

Changes

This is the VP equivalent of #65674. We already combine MGATHER loads with unit stride to MLOAD, so this extends it for EXPERIMENTAL_VP_STRIDED_LOAD.

I've added the tests in a separate commit in this PR so you can see the test diff.


Patch is 38.09 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/66766.diff

5 Files Affected:

  • (modified) llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp (+21)
  • (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpload.ll (+87-16)
  • (modified) llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpstore.ll (+81-4)
  • (modified) llvm/test/CodeGen/RISCV/rvv/strided-vpload.ll (+90-20)
  • (modified) llvm/test/CodeGen/RISCV/rvv/strided-vpstore.ll (+87-10)
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index 484a6231b7f65fe..df69dbb16042f7e 100644 --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -539,6 +539,7 @@ namespace { SDValue visitMSCATTER(SDNode *N); SDValue visitVPGATHER(SDNode *N); SDValue visitVPSCATTER(SDNode *N); + SDValue visitVP_STRIDED_LOAD(SDNode *N); SDValue visitFP_TO_FP16(SDNode *N); SDValue visitFP16_TO_FP(SDNode *N); SDValue visitFP_TO_BF16(SDNode *N); @@ -11959,6 +11960,22 @@ SDValue DAGCombiner::visitMLOAD(SDNode *N) { return SDValue(); } +SDValue DAGCombiner::visitVP_STRIDED_LOAD(SDNode *N) { + auto *SLD = cast<VPStridedLoadSDNode>(N); + EVT EltVT = SLD->getValueType(0).getVectorElementType(); + // Combine strided loads with unit-stride to a regular load. + if (auto *CStride = dyn_cast<ConstantSDNode>(SLD->getStride()); + CStride && CStride->getZExtValue() == EltVT.getStoreSize()) { + SDValue NewLd = DAG.getLoadVP( + SLD->getAddressingMode(), SLD->getExtensionType(), SLD->getValueType(0), + SDLoc(N), SLD->getChain(), SLD->getBasePtr(), SLD->getOffset(), + SLD->getMask(), SLD->getVectorLength(), SLD->getMemoryVT(), + SLD->getMemOperand(), SLD->isExpandingLoad()); + return CombineTo(N, NewLd, NewLd.getValue(1)); + } + return SDValue(); +} + /// A vector select of 2 constant vectors can be simplified to math/logic to /// avoid a variable select instruction and possibly avoid constant loads. SDValue DAGCombiner::foldVSelectOfConstants(SDNode *N) { @@ -25976,6 +25993,10 @@ SDValue DAGCombiner::visitVPOp(SDNode *N) { if (SDValue SD = visitVPSCATTER(N)) return SD; + if (N->getOpcode() == ISD::EXPERIMENTAL_VP_STRIDED_LOAD) + if (SDValue SD = visitVP_STRIDED_LOAD(N)) + return SD; + // VP operations in which all vector elements are disabled - either by // determining that the mask is all false or that the EVL is 0 - can be // eliminated. diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpload.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpload.ll index 96100d2b62e41b2..2ae031798f5bd6c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpload.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpload.ll @@ -96,6 +96,16 @@ define <8 x i8> @strided_vpload_v8i8(ptr %ptr, i32 signext %stride, <8 x i1> %m, ret <8 x i8> %load } +define <8 x i8> @strided_vpload_v8i8_unit_stride(ptr %ptr, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: strided_vpload_v8i8_unit_stride: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vle8.v v8, (a0), v0.t +; CHECK-NEXT: ret + %load = call <8 x i8> @llvm.experimental.vp.strided.load.v8i8.p0.i32(ptr %ptr, i32 1, <8 x i1> %m, i32 %evl) + ret <8 x i8> %load +} + declare <2 x i16> @llvm.experimental.vp.strided.load.v2i16.p0.i32(ptr, i32, <2 x i1>, i32) define <2 x i16> @strided_vpload_v2i16(ptr %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) { @@ -132,6 +142,16 @@ define <8 x i16> @strided_vpload_v8i16(ptr %ptr, i32 signext %stride, <8 x i1> % ret <8 x i16> %load } +define <8 x i16> @strided_vpload_v8i16_unit_stride(ptr %ptr, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: strided_vpload_v8i16_unit_stride: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vle16.v v8, (a0), v0.t +; CHECK-NEXT: ret + %load = call <8 x i16> @llvm.experimental.vp.strided.load.v8i16.p0.i32(ptr %ptr, i32 2, <8 x i1> %m, i32 %evl) + ret <8 x i16> %load +} + define <8 x i16> @strided_vpload_v8i16_allones_mask(ptr %ptr, i32 signext %stride, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_v8i16_allones_mask: ; CHECK: # %bb.0: @@ -168,6 +188,16 @@ define <4 x i32> @strided_vpload_v4i32(ptr %ptr, i32 signext %stride, <4 x i1> % ret <4 x i32> %load } +define <4 x i32> @strided_vpload_v4i32_unit_stride(ptr %ptr, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: strided_vpload_v4i32_unit_stride: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vle32.v v8, (a0), v0.t +; CHECK-NEXT: ret + %load = call <4 x i32> @llvm.experimental.vp.strided.load.v4i32.p0.i32(ptr %ptr, i32 4, <4 x i1> %m, i32 %evl) + ret <4 x i32> %load +} + declare <8 x i32> @llvm.experimental.vp.strided.load.v8i32.p0.i32(ptr, i32, <8 x i1>, i32) define <8 x i32> @strided_vpload_v8i32(ptr %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) { @@ -204,6 +234,16 @@ define <2 x i64> @strided_vpload_v2i64(ptr %ptr, i32 signext %stride, <2 x i1> % ret <2 x i64> %load } +define <2 x i64> @strided_vpload_v2i64_unit_stride(ptr %ptr, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: strided_vpload_v2i64_unit_stride: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vle64.v v8, (a0), v0.t +; CHECK-NEXT: ret + %load = call <2 x i64> @llvm.experimental.vp.strided.load.v2i64.p0.i32(ptr %ptr, i32 8, <2 x i1> %m, i32 %evl) + ret <2 x i64> %load +} + declare <4 x i64> @llvm.experimental.vp.strided.load.v4i64.p0.i32(ptr, i32, <4 x i1>, i32) define <4 x i64> @strided_vpload_v4i64(ptr %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) { @@ -288,6 +328,16 @@ define <8 x half> @strided_vpload_v8f16(ptr %ptr, i32 signext %stride, <8 x i1> ret <8 x half> %load } +define <8 x half> @strided_vpload_v8f16_unit_stride(ptr %ptr, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: strided_vpload_v8f16_unit_stride: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vle16.v v8, (a0), v0.t +; CHECK-NEXT: ret + %load = call <8 x half> @llvm.experimental.vp.strided.load.v8f16.p0.i32(ptr %ptr, i32 2, <8 x i1> %m, i32 %evl) + ret <8 x half> %load +} + declare <2 x float> @llvm.experimental.vp.strided.load.v2f32.p0.i32(ptr, i32, <2 x i1>, i32) define <2 x float> @strided_vpload_v2f32(ptr %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) { @@ -312,6 +362,16 @@ define <4 x float> @strided_vpload_v4f32(ptr %ptr, i32 signext %stride, <4 x i1> ret <4 x float> %load } +define <4 x float> @strided_vpload_v4f32_unit_stride(ptr %ptr, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: strided_vpload_v4f32_unit_stride: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vle32.v v8, (a0), v0.t +; CHECK-NEXT: ret + %load = call <4 x float> @llvm.experimental.vp.strided.load.v4f32.p0.i32(ptr %ptr, i32 4, <4 x i1> %m, i32 %evl) + ret <4 x float> %load +} + declare <8 x float> @llvm.experimental.vp.strided.load.v8f32.p0.i32(ptr, i32, <8 x i1>, i32) define <8 x float> @strided_vpload_v8f32(ptr %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) { @@ -348,6 +408,17 @@ define <2 x double> @strided_vpload_v2f64(ptr %ptr, i32 signext %stride, <2 x i1 ret <2 x double> %load } +define <2 x double> @strided_vpload_v2f64_unit_stride(ptr %ptr, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: strided_vpload_v2f64_unit_stride: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vle64.v v8, (a0), v0.t +; CHECK-NEXT: ret + %load = call <2 x double> @llvm.experimental.vp.strided.load.v2f64.p0.i32(ptr %ptr, i32 8, <2 x i1> %m, i32 %evl) + ret <2 x double> %load +} + + declare <4 x double> @llvm.experimental.vp.strided.load.v4f64.p0.i32(ptr, i32, <4 x i1>, i32) define <4 x double> @strided_vpload_v4f64(ptr %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) { @@ -416,10 +487,10 @@ define <32 x double> @strided_vpload_v32f64(ptr %ptr, i32 signext %stride, <32 x ; CHECK-NEXT: li a4, 16 ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: mv a3, a2 -; CHECK-NEXT: bltu a2, a4, .LBB33_2 +; CHECK-NEXT: bltu a2, a4, .LBB40_2 ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: li a3, 16 -; CHECK-NEXT: .LBB33_2: +; CHECK-NEXT: .LBB40_2: ; CHECK-NEXT: mul a4, a3, a1 ; CHECK-NEXT: add a4, a0, a4 ; CHECK-NEXT: addi a5, a2, -16 @@ -444,10 +515,10 @@ define <32 x double> @strided_vpload_v32f64_allones_mask(ptr %ptr, i32 signext % ; CHECK: # %bb.0: ; CHECK-NEXT: li a4, 16 ; CHECK-NEXT: mv a3, a2 -; CHECK-NEXT: bltu a2, a4, .LBB34_2 +; CHECK-NEXT: bltu a2, a4, .LBB41_2 ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: li a3, 16 -; CHECK-NEXT: .LBB34_2: +; CHECK-NEXT: .LBB41_2: ; CHECK-NEXT: mul a4, a3, a1 ; CHECK-NEXT: add a4, a0, a4 ; CHECK-NEXT: addi a5, a2, -16 @@ -474,10 +545,10 @@ define <33 x double> @strided_load_v33f64(ptr %ptr, i64 %stride, <33 x i1> %mask ; CHECK-RV32-NEXT: li a5, 32 ; CHECK-RV32-NEXT: vmv1r.v v8, v0 ; CHECK-RV32-NEXT: mv a3, a4 -; CHECK-RV32-NEXT: bltu a4, a5, .LBB35_2 +; CHECK-RV32-NEXT: bltu a4, a5, .LBB42_2 ; CHECK-RV32-NEXT: # %bb.1: ; CHECK-RV32-NEXT: li a3, 32 -; CHECK-RV32-NEXT: .LBB35_2: +; CHECK-RV32-NEXT: .LBB42_2: ; CHECK-RV32-NEXT: mul a5, a3, a2 ; CHECK-RV32-NEXT: addi a6, a4, -32 ; CHECK-RV32-NEXT: sltu a4, a4, a6 @@ -485,10 +556,10 @@ define <33 x double> @strided_load_v33f64(ptr %ptr, i64 %stride, <33 x i1> %mask ; CHECK-RV32-NEXT: and a6, a4, a6 ; CHECK-RV32-NEXT: li a4, 16 ; CHECK-RV32-NEXT: add a5, a1, a5 -; CHECK-RV32-NEXT: bltu a6, a4, .LBB35_4 +; CHECK-RV32-NEXT: bltu a6, a4, .LBB42_4 ; CHECK-RV32-NEXT: # %bb.3: ; CHECK-RV32-NEXT: li a6, 16 -; CHECK-RV32-NEXT: .LBB35_4: +; CHECK-RV32-NEXT: .LBB42_4: ; CHECK-RV32-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; CHECK-RV32-NEXT: vslidedown.vi v0, v8, 4 ; CHECK-RV32-NEXT: vsetvli zero, a6, e64, m8, ta, ma @@ -497,10 +568,10 @@ define <33 x double> @strided_load_v33f64(ptr %ptr, i64 %stride, <33 x i1> %mask ; CHECK-RV32-NEXT: sltu a6, a3, a5 ; CHECK-RV32-NEXT: addi a6, a6, -1 ; CHECK-RV32-NEXT: and a5, a6, a5 -; CHECK-RV32-NEXT: bltu a3, a4, .LBB35_6 +; CHECK-RV32-NEXT: bltu a3, a4, .LBB42_6 ; CHECK-RV32-NEXT: # %bb.5: ; CHECK-RV32-NEXT: li a3, 16 -; CHECK-RV32-NEXT: .LBB35_6: +; CHECK-RV32-NEXT: .LBB42_6: ; CHECK-RV32-NEXT: mul a4, a3, a2 ; CHECK-RV32-NEXT: add a4, a1, a4 ; CHECK-RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma @@ -524,10 +595,10 @@ define <33 x double> @strided_load_v33f64(ptr %ptr, i64 %stride, <33 x i1> %mask ; CHECK-RV64-NEXT: li a5, 32 ; CHECK-RV64-NEXT: vmv1r.v v8, v0 ; CHECK-RV64-NEXT: mv a4, a3 -; CHECK-RV64-NEXT: bltu a3, a5, .LBB35_2 +; CHECK-RV64-NEXT: bltu a3, a5, .LBB42_2 ; CHECK-RV64-NEXT: # %bb.1: ; CHECK-RV64-NEXT: li a4, 32 -; CHECK-RV64-NEXT: .LBB35_2: +; CHECK-RV64-NEXT: .LBB42_2: ; CHECK-RV64-NEXT: mul a5, a4, a2 ; CHECK-RV64-NEXT: addi a6, a3, -32 ; CHECK-RV64-NEXT: sltu a3, a3, a6 @@ -535,10 +606,10 @@ define <33 x double> @strided_load_v33f64(ptr %ptr, i64 %stride, <33 x i1> %mask ; CHECK-RV64-NEXT: and a6, a3, a6 ; CHECK-RV64-NEXT: li a3, 16 ; CHECK-RV64-NEXT: add a5, a1, a5 -; CHECK-RV64-NEXT: bltu a6, a3, .LBB35_4 +; CHECK-RV64-NEXT: bltu a6, a3, .LBB42_4 ; CHECK-RV64-NEXT: # %bb.3: ; CHECK-RV64-NEXT: li a6, 16 -; CHECK-RV64-NEXT: .LBB35_4: +; CHECK-RV64-NEXT: .LBB42_4: ; CHECK-RV64-NEXT: vsetivli zero, 4, e8, mf2, ta, ma ; CHECK-RV64-NEXT: vslidedown.vi v0, v8, 4 ; CHECK-RV64-NEXT: vsetvli zero, a6, e64, m8, ta, ma @@ -547,10 +618,10 @@ define <33 x double> @strided_load_v33f64(ptr %ptr, i64 %stride, <33 x i1> %mask ; CHECK-RV64-NEXT: sltu a6, a4, a5 ; CHECK-RV64-NEXT: addi a6, a6, -1 ; CHECK-RV64-NEXT: and a5, a6, a5 -; CHECK-RV64-NEXT: bltu a4, a3, .LBB35_6 +; CHECK-RV64-NEXT: bltu a4, a3, .LBB42_6 ; CHECK-RV64-NEXT: # %bb.5: ; CHECK-RV64-NEXT: li a4, 16 -; CHECK-RV64-NEXT: .LBB35_6: +; CHECK-RV64-NEXT: .LBB42_6: ; CHECK-RV64-NEXT: mul a3, a4, a2 ; CHECK-RV64-NEXT: add a3, a1, a3 ; CHECK-RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpstore.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpstore.ll index 781be5f607da162..c8abb69837875b8 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpstore.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpstore.ll @@ -84,6 +84,17 @@ define void @strided_vpstore_v8i8(<8 x i8> %val, ptr %ptr, i32 signext %stride, ret void } +define void @strided_vpstore_v8i8_unit_stride(<8 x i8> %val, ptr %ptr, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: strided_vpstore_v8i8_unit_stride: +; CHECK: # %bb.0: +; CHECK-NEXT: li a2, 1 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vsse8.v v8, (a0), a2, v0.t +; CHECK-NEXT: ret + call void @llvm.experimental.vp.strided.store.v8i8.p0.i32(<8 x i8> %val, ptr %ptr, i32 1, <8 x i1> %m, i32 %evl) + ret void +} + declare void @llvm.experimental.vp.strided.store.v2i16.p0.i32(<2 x i16>, ptr, i32, <2 x i1>, i32) define void @strided_vpstore_v2i16(<2 x i16> %val, ptr %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) { @@ -120,6 +131,17 @@ define void @strided_vpstore_v8i16(<8 x i16> %val, ptr %ptr, i32 signext %stride ret void } +define void @strided_vpstore_v8i16_unit_stride(<8 x i16> %val, ptr %ptr, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: strided_vpstore_v8i16_unit_stride: +; CHECK: # %bb.0: +; CHECK-NEXT: li a2, 2 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsse16.v v8, (a0), a2, v0.t +; CHECK-NEXT: ret + call void @llvm.experimental.vp.strided.store.v8i16.p0.i32(<8 x i16> %val, ptr %ptr, i32 2, <8 x i1> %m, i32 %evl) + ret void +} + declare void @llvm.experimental.vp.strided.store.v2i32.p0.i32(<2 x i32>, ptr, i32, <2 x i1>, i32) define void @strided_vpstore_v2i32(<2 x i32> %val, ptr %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) { @@ -144,6 +166,17 @@ define void @strided_vpstore_v4i32(<4 x i32> %val, ptr %ptr, i32 signext %stride ret void } +define void @strided_vpstore_v4i32_unit_stride(<4 x i32> %val, ptr %ptr, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: strided_vpstore_v4i32_unit_stride: +; CHECK: # %bb.0: +; CHECK-NEXT: li a2, 4 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsse32.v v8, (a0), a2, v0.t +; CHECK-NEXT: ret + call void @llvm.experimental.vp.strided.store.v4i32.p0.i32(<4 x i32> %val, ptr %ptr, i32 4, <4 x i1> %m, i32 %evl) + ret void +} + declare void @llvm.experimental.vp.strided.store.v8i32.p0.i32(<8 x i32>, ptr, i32, <8 x i1>, i32) define void @strided_vpstore_v8i32(<8 x i32> %val, ptr %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) { @@ -168,6 +201,17 @@ define void @strided_vpstore_v2i64(<2 x i64> %val, ptr %ptr, i32 signext %stride ret void } +define void @strided_vpstore_v2i64_unit_stride(<2 x i64> %val, ptr %ptr, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: strided_vpstore_v2i64_unit_stride: +; CHECK: # %bb.0: +; CHECK-NEXT: li a2, 8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsse64.v v8, (a0), a2, v0.t +; CHECK-NEXT: ret + call void @llvm.experimental.vp.strided.store.v2i64.p0.i32(<2 x i64> %val, ptr %ptr, i32 8, <2 x i1> %m, i32 %evl) + ret void +} + declare void @llvm.experimental.vp.strided.store.v4i64.p0.i32(<4 x i64>, ptr, i32, <4 x i1>, i32) define void @strided_vpstore_v4i64(<4 x i64> %val, ptr %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) { @@ -228,6 +272,17 @@ define void @strided_vpstore_v8f16(<8 x half> %val, ptr %ptr, i32 signext %strid ret void } +define void @strided_vpstore_v8f16_unit_stride(<8 x half> %val, ptr %ptr, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: strided_vpstore_v8f16_unit_stride: +; CHECK: # %bb.0: +; CHECK-NEXT: li a2, 2 +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vsse16.v v8, (a0), a2, v0.t +; CHECK-NEXT: ret + call void @llvm.experimental.vp.strided.store.v8f16.p0.i32(<8 x half> %val, ptr %ptr, i32 2, <8 x i1> %m, i32 %evl) + ret void +} + declare void @llvm.experimental.vp.strided.store.v2f32.p0.i32(<2 x float>, ptr, i32, <2 x i1>, i32) define void @strided_vpstore_v2f32(<2 x float> %val, ptr %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) { @@ -252,6 +307,17 @@ define void @strided_vpstore_v4f32(<4 x float> %val, ptr %ptr, i32 signext %stri ret void } +define void @strided_vpstore_v4f32_unit_stride(<4 x float> %val, ptr %ptr, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: strided_vpstore_v4f32_unit_stride: +; CHECK: # %bb.0: +; CHECK-NEXT: li a2, 4 +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vsse32.v v8, (a0), a2, v0.t +; CHECK-NEXT: ret + call void @llvm.experimental.vp.strided.store.v4f32.p0.i32(<4 x float> %val, ptr %ptr, i32 4, <4 x i1> %m, i32 %evl) + ret void +} + declare void @llvm.experimental.vp.strided.store.v8f32.p0.i32(<8 x float>, ptr, i32, <8 x i1>, i32) define void @strided_vpstore_v8f32(<8 x float> %val, ptr %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) { @@ -276,6 +342,17 @@ define void @strided_vpstore_v2f64(<2 x double> %val, ptr %ptr, i32 signext %str ret void } +define void @strided_vpstore_v2f64_unit_stride(<2 x double> %val, ptr %ptr, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: strided_vpstore_v2f64_unit_stride: +; CHECK: # %bb.0: +; CHECK-NEXT: li a2, 8 +; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; CHECK-NEXT: vsse64.v v8, (a0), a2, v0.t +; CHECK-NEXT: ret + call void @llvm.experimental.vp.strided.store.v2f64.p0.i32(<2 x double> %val, ptr %ptr, i32 8, <2 x i1> %m, i32 %evl) + ret void +} + declare void @llvm.experimental.vp.strided.store.v4f64.p0.i32(<4 x double>, ptr, i32, <4 x i1>, i32) define void @strided_vpstore_v4f64(<4 x double> %val, ptr %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) { @@ -343,10 +420,10 @@ define void @strided_store_v32f64(<32 x double> %v, ptr %ptr, i32 signext %strid ; CHECK: # %bb.0: ; CHECK-NEXT: li a4, 16 ; CHECK-NEXT: mv a3, a2 -; CHECK-NEXT: bltu a2, a4, .LBB27_2 +; CHECK-NEXT: bltu a2, a4, .LBB34_2 ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: li a3, 16 -; CHECK-NEXT: .LBB27_2: +; CHECK-NEXT: .LBB34_2: ; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, ma ; CHECK-NEXT: vsse64.v v8, (a0), a1, v0.t ; CHECK-NEXT: mul a3, a3, a1 @@ -369,10 +446,10 @@ define void @strided_store_v32f64_allones_mask(<32 x double> %v, ptr %ptr, i32 s ; CHECK: # %bb.0: ; CHECK-NEXT: li a4, 16 ; CHECK-NEXT: mv a3, a2 -; CHECK-NEXT: bltu a2, a4, .LBB28_2 +; CHECK-NEXT: bltu a2, a4, .LBB35_2 ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: li a3, 16 -; CHECK-NEXT: .LBB28_2: +; CHECK-NEXT: .LBB35_2: ; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, ma ; CHECK-NEXT: vsse64.v v8, (a0), a1 ; CHECK-NEXT: mul a3, a3, a1 diff --git a/llvm/test/CodeGen/RISCV/rvv/strided-vpload.ll b/llvm/test/CodeGen/RISCV/rvv/strided-vpload.ll index d8431ad7662d923..47074d612bb646d 100644 --- a/llvm/test/CodeGen/RISCV/rvv/strided-vpload.ll +++ b/llvm/test/CodeGen/RISCV/rvv/strided-vpload.ll @@ -126,6 +126,16 @@ define <vscale x 8 x i8> @strided_vpload_nxv8i8(ptr %ptr, i32 signext %stride, < ret <vscale x 8 x i8> %load } +define <vscale x 8 x i8> @strided_vpload_nxv8i8_unit_stride(ptr %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: strided_vpload_nxv8i8_unit_stride: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vle8.v v8, (a0), v0.t +; CHECK-NEXT: ret + %load = call <vscale x 8 x i8> @llvm.experimental.vp.strided.load.nxv8i8.p0.i32(ptr %ptr, i32 1, <vscale x 8 x i1> %m, i32 %evl) + ret <vscale x 8 x i8> %load +} + define <vscale x 8 x i8> @strided_vpload_nxv8i8_allones_mask(ptr %ptr, i32 signext %stride, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv8i8_allones_mask: ; CHECK: # %bb.0: @@ -186,6 +196,16 @@ define <vscale x 4 x i16> @strided_vpload_nxv4i16(ptr %ptr, i32 signext %stride, ret <vscale x 4 x i16> %load } +define <vscale x 4 x i16> @strided_vpload_nxv4i16_unit_stride(ptr %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: strided_vpload_nxv4i16_unit_stride: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma +; CHECK-NEXT: vle16.v v8, (a0), v0.t +; CHECK-NEXT: ret + %load = call <vscale x 4 x i... [truncated] 
This is the VP equivalent of llvm#65674. We already combine MGATHER loads with unit stride to MLOAD, so this extends it for EXPERIMENTAL_VP_STRIDED_LOAD.
@lukel97 lukel97 force-pushed the unit-strided-vp-load branch from 4945618 to 6ef5440 Compare September 19, 2023 12:54
@lukel97 lukel97 requested a review from asb September 19, 2023 13:10
SDValue DAGCombiner::visitVP_STRIDED_LOAD(SDNode *N) {
auto *SLD = cast<VPStridedLoadSDNode>(N);
EVT EltVT = SLD->getValueType(0).getVectorElementType();
// Combine strided loads with unit-stride to a regular load.
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Misleading comment - the result is a vp.load not a regular load. i.e. it is still predicated.

Copy link
Collaborator

@preames preames left a comment

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

LGTM w/comment addressed.

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment

Labels

llvm:SelectionDAG SelectionDAGISel as well

3 participants