Mercurial > hg > CbC > CbC_llvm
diff llvm/test/Transforms/CodeGenPrepare/X86/x86-shuffle-sink.ll @ 252:1f2b6ac9f198 llvm-original
LLVM16-1
author | Shinji KONO <kono@ie.u-ryukyu.ac.jp> |
---|---|
date | Fri, 18 Aug 2023 09:04:13 +0900 |
parents | c4bab56944e8 |
children |
line wrap: on
line diff
--- a/llvm/test/Transforms/CodeGenPrepare/X86/x86-shuffle-sink.ll Wed Nov 09 17:47:54 2022 +0900 +++ b/llvm/test/Transforms/CodeGenPrepare/X86/x86-shuffle-sink.ll Fri Aug 18 09:04:13 2023 +0900 @@ -121,17 +121,17 @@ define <4 x i32> @test_32bit(<4 x i32> %lhs, <4 x i32> %tmp, i1 %tst) { ; CHECK-SSE2-LABEL: @test_32bit( -; CHECK-SSE2-NEXT: [[MASK:%.*]] = shufflevector <4 x i32> [[TMP:%.*]], <4 x i32> undef, <4 x i32> <i32 0, i32 undef, i32 0, i32 0> +; CHECK-SSE2-NEXT: [[MASK:%.*]] = shufflevector <4 x i32> [[TMP:%.*]], <4 x i32> undef, <4 x i32> <i32 0, i32 poison, i32 0, i32 0> ; CHECK-SSE2-NEXT: br i1 [[TST:%.*]], label [[IF_TRUE:%.*]], label [[IF_FALSE:%.*]] ; CHECK-SSE2: if_true: ; CHECK-SSE2-NEXT: ret <4 x i32> [[MASK]] ; CHECK-SSE2: if_false: -; CHECK-SSE2-NEXT: [[TMP1:%.*]] = shufflevector <4 x i32> [[TMP]], <4 x i32> undef, <4 x i32> <i32 0, i32 undef, i32 0, i32 0> +; CHECK-SSE2-NEXT: [[TMP1:%.*]] = shufflevector <4 x i32> [[TMP]], <4 x i32> undef, <4 x i32> <i32 0, i32 poison, i32 0, i32 0> ; CHECK-SSE2-NEXT: [[RES:%.*]] = ashr <4 x i32> [[LHS:%.*]], [[TMP1]] ; CHECK-SSE2-NEXT: ret <4 x i32> [[RES]] ; ; CHECK-XOP-LABEL: @test_32bit( -; CHECK-XOP-NEXT: [[MASK:%.*]] = shufflevector <4 x i32> [[TMP:%.*]], <4 x i32> undef, <4 x i32> <i32 0, i32 undef, i32 0, i32 0> +; CHECK-XOP-NEXT: [[MASK:%.*]] = shufflevector <4 x i32> [[TMP:%.*]], <4 x i32> undef, <4 x i32> <i32 0, i32 poison, i32 0, i32 0> ; CHECK-XOP-NEXT: br i1 [[TST:%.*]], label [[IF_TRUE:%.*]], label [[IF_FALSE:%.*]] ; CHECK-XOP: if_true: ; CHECK-XOP-NEXT: ret <4 x i32> [[MASK]] @@ -140,7 +140,7 @@ ; CHECK-XOP-NEXT: ret <4 x i32> [[RES]] ; ; CHECK-AVX-LABEL: @test_32bit( -; CHECK-AVX-NEXT: [[MASK:%.*]] = shufflevector <4 x i32> [[TMP:%.*]], <4 x i32> undef, <4 x i32> <i32 0, i32 undef, i32 0, i32 0> +; CHECK-AVX-NEXT: [[MASK:%.*]] = shufflevector <4 x i32> [[TMP:%.*]], <4 x i32> undef, <4 x i32> <i32 0, i32 poison, i32 0, i32 0> ; CHECK-AVX-NEXT: br i1 [[TST:%.*]], label [[IF_TRUE:%.*]], label [[IF_FALSE:%.*]] ; CHECK-AVX: if_true: ; CHECK-AVX-NEXT: ret <4 x i32> [[MASK]] @@ -199,19 +199,18 @@ ret <2 x i64> %res } -define void @funnel_splatvar(i32* nocapture %arr, i32 %rot) { +define void @funnel_splatvar(ptr nocapture %arr, i32 %rot) { ; CHECK-SSE2-LABEL: @funnel_splatvar( ; CHECK-SSE2-NEXT: entry: ; CHECK-SSE2-NEXT: [[BROADCAST_SPLATINSERT15:%.*]] = insertelement <8 x i32> undef, i32 [[ROT:%.*]], i32 0 ; CHECK-SSE2-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-SSE2: vector.body: ; CHECK-SSE2-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-SSE2-NEXT: [[T0:%.*]] = getelementptr inbounds i32, i32* [[ARR:%.*]], i64 [[INDEX]] -; CHECK-SSE2-NEXT: [[T1:%.*]] = bitcast i32* [[T0]] to <8 x i32>* -; CHECK-SSE2-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i32>, <8 x i32>* [[T1]], align 4 +; CHECK-SSE2-NEXT: [[T0:%.*]] = getelementptr inbounds i32, ptr [[ARR:%.*]], i64 [[INDEX]] +; CHECK-SSE2-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i32>, ptr [[T0]], align 4 ; CHECK-SSE2-NEXT: [[TMP0:%.*]] = shufflevector <8 x i32> [[BROADCAST_SPLATINSERT15]], <8 x i32> undef, <8 x i32> zeroinitializer ; CHECK-SSE2-NEXT: [[T2:%.*]] = call <8 x i32> @llvm.fshl.v8i32(<8 x i32> [[WIDE_LOAD]], <8 x i32> [[WIDE_LOAD]], <8 x i32> [[TMP0]]) -; CHECK-SSE2-NEXT: store <8 x i32> [[T2]], <8 x i32>* [[T1]], align 4 +; CHECK-SSE2-NEXT: store <8 x i32> [[T2]], ptr [[T0]], align 4 ; CHECK-SSE2-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 8 ; CHECK-SSE2-NEXT: [[T3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 65536 ; CHECK-SSE2-NEXT: br i1 [[T3]], label [[FOR_COND_CLEANUP:%.*]], label [[VECTOR_BODY]] @@ -225,11 +224,10 @@ ; CHECK-XOP-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-XOP: vector.body: ; CHECK-XOP-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-XOP-NEXT: [[T0:%.*]] = getelementptr inbounds i32, i32* [[ARR:%.*]], i64 [[INDEX]] -; CHECK-XOP-NEXT: [[T1:%.*]] = bitcast i32* [[T0]] to <8 x i32>* -; CHECK-XOP-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i32>, <8 x i32>* [[T1]], align 4 +; CHECK-XOP-NEXT: [[T0:%.*]] = getelementptr inbounds i32, ptr [[ARR:%.*]], i64 [[INDEX]] +; CHECK-XOP-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i32>, ptr [[T0]], align 4 ; CHECK-XOP-NEXT: [[T2:%.*]] = call <8 x i32> @llvm.fshl.v8i32(<8 x i32> [[WIDE_LOAD]], <8 x i32> [[WIDE_LOAD]], <8 x i32> [[BROADCAST_SPLAT16]]) -; CHECK-XOP-NEXT: store <8 x i32> [[T2]], <8 x i32>* [[T1]], align 4 +; CHECK-XOP-NEXT: store <8 x i32> [[T2]], ptr [[T0]], align 4 ; CHECK-XOP-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 8 ; CHECK-XOP-NEXT: [[T3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 65536 ; CHECK-XOP-NEXT: br i1 [[T3]], label [[FOR_COND_CLEANUP:%.*]], label [[VECTOR_BODY]] @@ -243,11 +241,10 @@ ; CHECK-AVX-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK-AVX: vector.body: ; CHECK-AVX-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] -; CHECK-AVX-NEXT: [[T0:%.*]] = getelementptr inbounds i32, i32* [[ARR:%.*]], i64 [[INDEX]] -; CHECK-AVX-NEXT: [[T1:%.*]] = bitcast i32* [[T0]] to <8 x i32>* -; CHECK-AVX-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i32>, <8 x i32>* [[T1]], align 4 +; CHECK-AVX-NEXT: [[T0:%.*]] = getelementptr inbounds i32, ptr [[ARR:%.*]], i64 [[INDEX]] +; CHECK-AVX-NEXT: [[WIDE_LOAD:%.*]] = load <8 x i32>, ptr [[T0]], align 4 ; CHECK-AVX-NEXT: [[T2:%.*]] = call <8 x i32> @llvm.fshl.v8i32(<8 x i32> [[WIDE_LOAD]], <8 x i32> [[WIDE_LOAD]], <8 x i32> [[BROADCAST_SPLAT16]]) -; CHECK-AVX-NEXT: store <8 x i32> [[T2]], <8 x i32>* [[T1]], align 4 +; CHECK-AVX-NEXT: store <8 x i32> [[T2]], ptr [[T0]], align 4 ; CHECK-AVX-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 8 ; CHECK-AVX-NEXT: [[T3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 65536 ; CHECK-AVX-NEXT: br i1 [[T3]], label [[FOR_COND_CLEANUP:%.*]], label [[VECTOR_BODY]] @@ -261,11 +258,10 @@ vector.body: %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ] - %t0 = getelementptr inbounds i32, i32* %arr, i64 %index - %t1 = bitcast i32* %t0 to <8 x i32>* - %wide.load = load <8 x i32>, <8 x i32>* %t1, align 4 + %t0 = getelementptr inbounds i32, ptr %arr, i64 %index + %wide.load = load <8 x i32>, ptr %t0, align 4 %t2 = call <8 x i32> @llvm.fshl.v8i32(<8 x i32> %wide.load, <8 x i32> %wide.load, <8 x i32> %broadcast.splat16) - store <8 x i32> %t2, <8 x i32>* %t1, align 4 + store <8 x i32> %t2, ptr %t0, align 4 %index.next = add i64 %index, 8 %t3 = icmp eq i64 %index.next, 65536 br i1 %t3, label %for.cond.cleanup, label %vector.body