From 61dc235f20fa7822478b64554473cf917f67e11e Mon Sep 17 00:00:00 2001 From: Matt Arsenault Date: Mon, 12 Oct 2015 23:59:50 +0000 Subject: [PATCH] DAGCombiner: Combine extract_vector_elt from build_vector This basic combine was surprisingly missing. AMDGPU legalizes many operations in terms of 32-bit vector components, so not doing this results in many extra copies and subregister extracts that need to be cleaned up later. InstCombine already does this for the hasOneUse case. The target hook is to fix a handful of tests which break (e.g. ARM/vmov.ll) which turn from a vector materialize repeated immediate instruction to a constant vector load with more scalar copies from it. llvm-svn: 250129 --- llvm/include/llvm/Target/TargetLowering.h | 6 ++++ llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 25 +++++++++++--- llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp | 12 +++++++ llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h | 1 + llvm/test/CodeGen/AArch64/fold-constants.ll | 3 -- llvm/test/CodeGen/AMDGPU/ds_read2.ll | 6 ++-- llvm/test/CodeGen/AMDGPU/fceil64.ll | 12 +++---- llvm/test/CodeGen/AMDGPU/ftrunc.f64.ll | 12 +++---- llvm/test/CodeGen/AMDGPU/gep-address-space.ll | 34 ++++++++++++++----- 9 files changed, 78 insertions(+), 33 deletions(-) diff --git a/llvm/include/llvm/Target/TargetLowering.h b/llvm/include/llvm/Target/TargetLowering.h index ae83833fd637..10194e37fef4 100644 --- a/llvm/include/llvm/Target/TargetLowering.h +++ b/llvm/include/llvm/Target/TargetLowering.h @@ -1720,6 +1720,12 @@ public: return false; } + // Return true if it is profitable to use a scalar input to a BUILD_VECTOR + // even if the vector itself has multiple uses. + virtual bool aggressivelyPreferBuildVectorSources(EVT VecVT) const { + return false; + } + //===--------------------------------------------------------------------===// // Runtime Library hooks // diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index 8f745a149ac9..a26f378b49aa 100644 --- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -11886,7 +11886,24 @@ SDValue DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) { } SDValue EltNo = N->getOperand(1); - bool ConstEltNo = isa(EltNo); + ConstantSDNode *ConstEltNo = dyn_cast(EltNo); + + // extract_vector_elt (build_vector x, y), 1 -> y + if (ConstEltNo && + InVec.getOpcode() == ISD::BUILD_VECTOR && + TLI.isTypeLegal(VT) && + (InVec.hasOneUse() || + TLI.aggressivelyPreferBuildVectorSources(VT))) { + SDValue Elt = InVec.getOperand(ConstEltNo->getZExtValue()); + EVT InEltVT = Elt.getValueType(); + + // Sometimes build_vector's scalar input types do not match result type. + if (NVT == InEltVT) + return Elt; + + // TODO: It may be useful to truncate if free if the build_vector implicitly + // converts. + } // Transform: (EXTRACT_VECTOR_ELT( VECTOR_SHUFFLE )) -> EXTRACT_VECTOR_ELT. // We only perform this optimization before the op legalization phase because @@ -11894,13 +11911,11 @@ SDValue DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) { // patterns. For example on AVX, extracting elements from a wide vector // without using extract_subvector. However, if we can find an underlying // scalar value, then we can always use that. - if (InVec.getOpcode() == ISD::VECTOR_SHUFFLE - && ConstEltNo) { - int Elt = cast(EltNo)->getZExtValue(); + if (ConstEltNo && InVec.getOpcode() == ISD::VECTOR_SHUFFLE) { int NumElem = VT.getVectorNumElements(); ShuffleVectorSDNode *SVOp = cast(InVec); // Find the new index to extract from. - int OrigElt = SVOp->getMaskElt(Elt); + int OrigElt = SVOp->getMaskElt(ConstEltNo->getZExtValue()); // Extracting an undef index is undef. if (OrigElt == -1) diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp index 8300431ec01d..a8af7ec75f04 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp +++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp @@ -533,6 +533,18 @@ bool AMDGPUTargetLowering:: storeOfVectorConstantIsCheap(EVT MemVT, return true; } +bool AMDGPUTargetLowering::aggressivelyPreferBuildVectorSources(EVT VecVT) const { + // There are few operations which truly have vector input operands. Any vector + // operation is going to involve operations on each component, and a + // build_vector will be a copy per element, so it always makes sense to use a + // build_vector input in place of the extracted element to avoid a copy into a + // super register. + // + // We should probably only do this if all users are extracts only, but this + // should be the common case. + return true; +} + bool AMDGPUTargetLowering::isTruncateFree(EVT Source, EVT Dest) const { // Truncate is just accessing a subregister. return Dest.bitsLT(Source) && (Dest.getSizeInBits() % 32 == 0); diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h index 3f5b1f59e068..1e060c4d7087 100644 --- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h +++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h @@ -138,6 +138,7 @@ public: bool storeOfVectorConstantIsCheap(EVT MemVT, unsigned NumElem, unsigned AS) const override; + bool aggressivelyPreferBuildVectorSources(EVT VecVT) const override; bool isCheapToSpeculateCttz() const override; bool isCheapToSpeculateCtlz() const override; diff --git a/llvm/test/CodeGen/AArch64/fold-constants.ll b/llvm/test/CodeGen/AArch64/fold-constants.ll index 2dd0d1245930..3f70f0a7e9f9 100644 --- a/llvm/test/CodeGen/AArch64/fold-constants.ll +++ b/llvm/test/CodeGen/AArch64/fold-constants.ll @@ -3,9 +3,6 @@ define i64 @dotests_616() { ; CHECK-LABEL: dotests_616 ; CHECK: movi d0, #0000000000000000 -; CHECK-NEXT: umov w8, v0.b[2] -; CHECK-NEXT: sbfx w8, w8, #0, #1 -; CHECK-NEXT: fmov s0, w8 ; CHECK-NEXT: fmov x0, d0 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/AMDGPU/ds_read2.ll b/llvm/test/CodeGen/AMDGPU/ds_read2.ll index ec04f8b1acd6..579f989faeb5 100644 --- a/llvm/test/CodeGen/AMDGPU/ds_read2.ll +++ b/llvm/test/CodeGen/AMDGPU/ds_read2.ll @@ -216,10 +216,8 @@ define void @read2_ptr_is_subreg_arg_offset_f32(float addrspace(1)* %out, <2 x f ret void } -; We should be able to merge in this case, but probably not worth the effort. -; SI-NOT: ds_read2_b32 -; SI: ds_read_b32 -; SI: ds_read_b32 +; SI-LABEL: {{^}}read2_ptr_is_subreg_f32: +; SI: ds_read2_b32 {{v\[[0-9]+:[0-9]+\]}}, {{v[0-9]+}} offset1:8{{$}} ; SI: s_endpgm define void @read2_ptr_is_subreg_f32(float addrspace(1)* %out) #0 { %x.i = tail call i32 @llvm.r600.read.tidig.x() #1 diff --git a/llvm/test/CodeGen/AMDGPU/fceil64.ll b/llvm/test/CodeGen/AMDGPU/fceil64.ll index e8c34f0141e4..c8ef5b101c4d 100644 --- a/llvm/test/CodeGen/AMDGPU/fceil64.ll +++ b/llvm/test/CodeGen/AMDGPU/fceil64.ll @@ -17,12 +17,12 @@ declare <16 x double> @llvm.ceil.v16f64(<16 x double>) nounwind readnone ; SI: s_lshr_b64 ; SI: s_not_b64 ; SI: s_and_b64 -; SI: cmp_gt_i32 -; SI: cndmask_b32 -; SI: cndmask_b32 -; SI: cmp_lt_i32 -; SI: cndmask_b32 -; SI: cndmask_b32 +; SI-DAG: cmp_gt_i32 +; SI-DAG: cndmask_b32 +; SI-DAG: cndmask_b32 +; SI-DAG: cmp_lt_i32 +; SI-DAG: cndmask_b32 +; SI-DAG: cndmask_b32 ; SI-DAG: v_cmp_lt_f64 ; SI-DAG: v_cmp_lg_f64 ; SI: s_and_b64 diff --git a/llvm/test/CodeGen/AMDGPU/ftrunc.f64.ll b/llvm/test/CodeGen/AMDGPU/ftrunc.f64.ll index 6618d8b5e57e..83a8ad8901d2 100644 --- a/llvm/test/CodeGen/AMDGPU/ftrunc.f64.ll +++ b/llvm/test/CodeGen/AMDGPU/ftrunc.f64.ll @@ -29,12 +29,12 @@ define void @v_ftrunc_f64(double addrspace(1)* %out, double addrspace(1)* %in) { ; SI: s_lshr_b64 ; SI: s_not_b64 ; SI: s_and_b64 -; SI: cmp_gt_i32 -; SI: cndmask_b32 -; SI: cndmask_b32 -; SI: cmp_lt_i32 -; SI: cndmask_b32 -; SI: cndmask_b32 +; SI-DAG: cmp_gt_i32 +; SI-DAG: cndmask_b32 +; SI-DAG: cndmask_b32 +; SI-DAG: cmp_lt_i32 +; SI-DAG: cndmask_b32 +; SI-DAG: cndmask_b32 ; SI: s_endpgm define void @ftrunc_f64(double addrspace(1)* %out, double %x) { %y = call double @llvm.trunc.f64(double %x) nounwind readnone diff --git a/llvm/test/CodeGen/AMDGPU/gep-address-space.ll b/llvm/test/CodeGen/AMDGPU/gep-address-space.ll index 471b0f6b13e7..f5ab390ce686 100644 --- a/llvm/test/CodeGen/AMDGPU/gep-address-space.ll +++ b/llvm/test/CodeGen/AMDGPU/gep-address-space.ll @@ -11,24 +11,35 @@ define void @use_gep_address_space([1024 x i32] addrspace(3)* %array) nounwind { ret void } -define void @use_gep_address_space_large_offset([1024 x i32] addrspace(3)* %array) nounwind { ; CHECK-LABEL: {{^}}use_gep_address_space_large_offset: ; The LDS offset will be 65536 bytes, which is larger than the size of LDS on ; SI, which is why it is being OR'd with the base pointer. ; SI: s_or_b32 ; CI: s_add_i32 ; CHECK: ds_write_b32 +define void @use_gep_address_space_large_offset([1024 x i32] addrspace(3)* %array) nounwind { %p = getelementptr [1024 x i32], [1024 x i32] addrspace(3)* %array, i16 0, i16 16384 store i32 99, i32 addrspace(3)* %p ret void } -define void @gep_as_vector_v4(<4 x [1024 x i32] addrspace(3)*> %array) nounwind { ; CHECK-LABEL: {{^}}gep_as_vector_v4: -; CHECK: s_add_i32 -; CHECK: s_add_i32 -; CHECK: s_add_i32 -; CHECK: s_add_i32 +; SI: s_add_i32 +; SI: s_add_i32 +; SI: s_add_i32 +; SI: s_add_i32 + +; CHECK-DAG: v_mov_b32_e32 {{v[0-9]+}}, {{s[0-9]+}} +; CHECK-DAG: v_mov_b32_e32 {{v[0-9]+}}, {{s[0-9]+}} +; CHECK-DAG: v_mov_b32_e32 {{v[0-9]+}}, {{s[0-9]+}} +; CHECK-DAG: v_mov_b32_e32 {{v[0-9]+}}, {{s[0-9]+}} + +; CI-DAG: ds_write_b32 v{{[0-9]+}}, v{{[0-9]+}} offset:64 +; CI-DAG: ds_write_b32 v{{[0-9]+}}, v{{[0-9]+}} offset:64 +; CI-DAG: ds_write_b32 v{{[0-9]+}}, v{{[0-9]+}} offset:64 +; CI-DAG: ds_write_b32 v{{[0-9]+}}, v{{[0-9]+}} offset:64 +; CHECK: s_endpgm +define void @gep_as_vector_v4(<4 x [1024 x i32] addrspace(3)*> %array) nounwind { %p = getelementptr [1024 x i32], <4 x [1024 x i32] addrspace(3)*> %array, <4 x i16> zeroinitializer, <4 x i16> %p0 = extractelement <4 x i32 addrspace(3)*> %p, i32 0 %p1 = extractelement <4 x i32 addrspace(3)*> %p, i32 1 @@ -41,10 +52,15 @@ define void @gep_as_vector_v4(<4 x [1024 x i32] addrspace(3)*> %array) nounwind ret void } -define void @gep_as_vector_v2(<2 x [1024 x i32] addrspace(3)*> %array) nounwind { ; CHECK-LABEL: {{^}}gep_as_vector_v2: -; CHECK: s_add_i32 -; CHECK: s_add_i32 +; SI: s_add_i32 +; SI: s_add_i32 +; CHECK-DAG: v_mov_b32_e32 {{v[0-9]+}}, {{s[0-9]+}} +; CHECK-DAG: v_mov_b32_e32 {{v[0-9]+}}, {{s[0-9]+}} +; CI-DAG: ds_write_b32 v{{[0-9]+}}, v{{[0-9]+}} offset:64 +; CI-DAG: ds_write_b32 v{{[0-9]+}}, v{{[0-9]+}} offset:64 +; CHECK: s_endpgm +define void @gep_as_vector_v2(<2 x [1024 x i32] addrspace(3)*> %array) nounwind { %p = getelementptr [1024 x i32], <2 x [1024 x i32] addrspace(3)*> %array, <2 x i16> zeroinitializer, <2 x i16> %p0 = extractelement <2 x i32 addrspace(3)*> %p, i32 0 %p1 = extractelement <2 x i32 addrspace(3)*> %p, i32 1