Mercurial > hg > CbC > CbC_llvm
diff test/CodeGen/X86/sse41.ll @ 95:afa8332a0e37 LLVM3.8
LLVM 3.8
author | Kaito Tokumori <e105711@ie.u-ryukyu.ac.jp> |
---|---|
date | Tue, 13 Oct 2015 17:48:58 +0900 |
parents | 60c9769439b8 |
children | 7d135dc70f03 |
line wrap: on
line diff
--- a/test/CodeGen/X86/sse41.ll Wed Feb 18 14:56:07 2015 +0900 +++ b/test/CodeGen/X86/sse41.ll Tue Oct 13 17:48:58 2015 +0900 @@ -31,49 +31,6 @@ ret <16 x i8> %tmp1 } -define <2 x i64> @pmovsxbd_1(i32* %p) nounwind { -; X32-LABEL: pmovsxbd_1: -; X32: ## BB#0: ## %entry -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: pmovsxbd (%eax), %xmm0 -; X32-NEXT: retl -; -; X64-LABEL: pmovsxbd_1: -; X64: ## BB#0: ## %entry -; X64-NEXT: pmovsxbd (%rdi), %xmm0 -; X64-NEXT: retq -entry: - %0 = load i32* %p, align 4 - %1 = insertelement <4 x i32> undef, i32 %0, i32 0 - %2 = insertelement <4 x i32> %1, i32 0, i32 1 - %3 = insertelement <4 x i32> %2, i32 0, i32 2 - %4 = insertelement <4 x i32> %3, i32 0, i32 3 - %5 = bitcast <4 x i32> %4 to <16 x i8> - %6 = tail call <4 x i32> @llvm.x86.sse41.pmovsxbd(<16 x i8> %5) nounwind readnone - %7 = bitcast <4 x i32> %6 to <2 x i64> - ret <2 x i64> %7 -} - -define <2 x i64> @pmovsxwd_1(i64* %p) nounwind readonly { -; X32-LABEL: pmovsxwd_1: -; X32: ## BB#0: ## %entry -; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: pmovsxwd (%eax), %xmm0 -; X32-NEXT: retl -; -; X64-LABEL: pmovsxwd_1: -; X64: ## BB#0: ## %entry -; X64-NEXT: pmovsxwd (%rdi), %xmm0 -; X64-NEXT: retq -entry: - %0 = load i64* %p ; <i64> [#uses=1] - %tmp2 = insertelement <2 x i64> zeroinitializer, i64 %0, i32 0 ; <<2 x i64>> [#uses=1] - %1 = bitcast <2 x i64> %tmp2 to <8 x i16> ; <<8 x i16>> [#uses=1] - %2 = tail call <4 x i32> @llvm.x86.sse41.pmovsxwd(<8 x i16> %1) nounwind readnone ; <<4 x i32>> [#uses=1] - %3 = bitcast <4 x i32> %2 to <2 x i64> ; <<2 x i64>> [#uses=1] - ret <2 x i64> %3 -} - define <2 x i64> @pmovzxbq_1() nounwind { ; X32-LABEL: pmovzxbq_1: ; X32: ## BB#0: ## %entry @@ -87,15 +44,13 @@ ; X64-NEXT: pmovzxbq {{.*#+}} xmm0 = mem[0],zero,zero,zero,zero,zero,zero,zero,mem[1],zero,zero,zero,zero,zero,zero,zero ; X64-NEXT: retq entry: - %0 = load i16* @g16, align 2 ; <i16> [#uses=1] + %0 = load i16, i16* @g16, align 2 ; <i16> [#uses=1] %1 = insertelement <8 x i16> undef, i16 %0, i32 0 ; <<8 x i16>> [#uses=1] %2 = bitcast <8 x i16> %1 to <16 x i8> ; <<16 x i8>> [#uses=1] %3 = tail call <2 x i64> @llvm.x86.sse41.pmovzxbq(<16 x i8> %2) nounwind readnone ; <<2 x i64>> [#uses=1] ret <2 x i64> %3 } -declare <4 x i32> @llvm.x86.sse41.pmovsxbd(<16 x i8>) nounwind readnone -declare <4 x i32> @llvm.x86.sse41.pmovsxwd(<8 x i16>) nounwind readnone declare <2 x i64> @llvm.x86.sse41.pmovzxbq(<16 x i8>) nounwind readnone define i32 @extractps_1(<4 x float> %v) nounwind { @@ -137,7 +92,7 @@ ; X32: ## BB#0: ; X32-NEXT: pushl %eax ; X32-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1,2,3] -; X32-NEXT: addss LCPI7_0, %xmm0 +; X32-NEXT: addss LCPI5_0, %xmm0 ; X32-NEXT: movss %xmm0, (%esp) ; X32-NEXT: flds (%esp) ; X32-NEXT: popl %eax @@ -199,28 +154,51 @@ declare <4 x float> @llvm.x86.sse41.insertps(<4 x float>, <4 x float>, i32) nounwind readnone -define <4 x float> @insertps_2(<4 x float> %t1, float %t2) nounwind { -; X32-LABEL: insertps_2: +; When optimizing for speed, prefer blendps over insertps even if it means we have to +; generate a separate movss to load the scalar operand. +define <4 x float> @blendps_not_insertps_1(<4 x float> %t1, float %t2) nounwind { +; X32-LABEL: blendps_not_insertps_1: +; X32: ## BB#0: +; X32-NEXT: movss {{.*#+}} xmm1 = mem[0],zero,zero,zero +; X32-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] +; X32-NEXT: retl +; +; X64-LABEL: blendps_not_insertps_1: +; X64: ## BB#0: +; X64-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] +; X64-NEXT: retq + %tmp1 = insertelement <4 x float> %t1, float %t2, i32 0 + ret <4 x float> %tmp1 +} + +; When optimizing for size, generate an insertps if there's a load fold opportunity. +; The difference between i386 and x86-64 ABIs for the float operand means we should +; generate an insertps for X32 but not for X64! +define <4 x float> @insertps_or_blendps(<4 x float> %t1, float %t2) minsize nounwind { +; X32-LABEL: insertps_or_blendps: ; X32: ## BB#0: ; X32-NEXT: insertps {{.*#+}} xmm0 = mem[0],xmm0[1,2,3] ; X32-NEXT: retl ; -; X64-LABEL: insertps_2: +; X64-LABEL: insertps_or_blendps: ; X64: ## BB#0: -; X64-NEXT: insertps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] +; X64-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] ; X64-NEXT: retq %tmp1 = insertelement <4 x float> %t1, float %t2, i32 0 ret <4 x float> %tmp1 } -define <4 x float> @insertps_3(<4 x float> %t1, <4 x float> %t2) nounwind { -; X32-LABEL: insertps_3: + +; An insert into the low 32-bits of a vector from the low 32-bits of another vector +; is always just a blendps because blendps is never more expensive than insertps. +define <4 x float> @blendps_not_insertps_2(<4 x float> %t1, <4 x float> %t2) nounwind { +; X32-LABEL: blendps_not_insertps_2: ; X32: ## BB#0: -; X32-NEXT: insertps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] +; X32-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] ; X32-NEXT: retl ; -; X64-LABEL: insertps_3: +; X64-LABEL: blendps_not_insertps_2: ; X64: ## BB#0: -; X64-NEXT: insertps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] +; X64-NEXT: blendps {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3] ; X64-NEXT: retq %tmp2 = extractelement <4 x float> %t2, i32 0 %tmp1 = insertelement <4 x float> %t1, float %tmp2, i32 0 @@ -330,7 +308,7 @@ ; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0] ; X64-NEXT: retq entry: - %0 = load <4 x float>* %pb, align 16 + %0 = load <4 x float>, <4 x float>* %pb, align 16 %vecinit6 = shufflevector <4 x float> %a, <4 x float> %0, <4 x i32> <i32 0, i32 1, i32 2, i32 4> ret <4 x float> %vecinit6 } @@ -366,7 +344,7 @@ ; X64-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5],xmm1[6,7] ; X64-NEXT: retq entry: - %0 = load <4 x i32>* %pb, align 16 + %0 = load <4 x i32>, <4 x i32>* %pb, align 16 %vecinit6 = shufflevector <4 x i32> %a, <4 x i32> %0, <4 x i32> <i32 0, i32 1, i32 2, i32 4> ret <4 x i32> %vecinit6 } @@ -399,7 +377,7 @@ ; X64: ## BB#0: ; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[2,3] ; X64-NEXT: retq - %1 = load float* %b, align 4 + %1 = load float, float* %b, align 4 %2 = insertelement <4 x float> undef, float %1, i32 0 %result = shufflevector <4 x float> %a, <4 x float> %2, <4 x i32> <i32 0, i32 4, i32 2, i32 3> ret <4 x float> %result @@ -421,7 +399,7 @@ ; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,1] ; X64-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5],xmm0[6,7] ; X64-NEXT: retq - %1 = load i32* %b, align 4 + %1 = load i32, i32* %b, align 4 %2 = insertelement <4 x i32> undef, i32 %1, i32 0 %result = shufflevector <4 x i32> %a, <4 x i32> %2, <4 x i32> <i32 0, i32 1, i32 4, i32 3> ret <4 x i32> %result @@ -551,18 +529,12 @@ define <4 x float> @shuf_X00X(<4 x float> %x, <4 x float> %a) { ; X32-LABEL: shuf_X00X: ; X32: ## BB#0: -; X32-NEXT: xorps %xmm1, %xmm1 -; X32-NEXT: blendps {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3] -; X32-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm0[0] -; X32-NEXT: movaps %xmm1, %xmm0 +; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],zero,zero,xmm0[0] ; X32-NEXT: retl ; ; X64-LABEL: shuf_X00X: ; X64: ## BB#0: -; X64-NEXT: xorps %xmm1, %xmm1 -; X64-NEXT: blendps {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3] -; X64-NEXT: insertps {{.*#+}} xmm1 = xmm1[0,1,2],xmm0[0] -; X64-NEXT: movaps %xmm1, %xmm0 +; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],zero,zero,xmm0[0] ; X64-NEXT: retq %vecext = extractelement <4 x float> %x, i32 0 %vecinit = insertelement <4 x float> undef, float %vecext, i32 0 @@ -575,20 +547,14 @@ define <4 x float> @shuf_X0YC(<4 x float> %x, <4 x float> %a) { ; X32-LABEL: shuf_X0YC: ; X32: ## BB#0: -; X32-NEXT: xorps %xmm2, %xmm2 -; X32-NEXT: blendps {{.*#+}} xmm2 = xmm0[0],xmm2[1],xmm0[2,3] -; X32-NEXT: insertps {{.*#+}} xmm2 = xmm2[0,1],xmm0[1],zero -; X32-NEXT: insertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm1[2] -; X32-NEXT: movaps %xmm2, %xmm0 +; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero +; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[2] ; X32-NEXT: retl ; ; X64-LABEL: shuf_X0YC: ; X64: ## BB#0: -; X64-NEXT: xorps %xmm2, %xmm2 -; X64-NEXT: blendps {{.*#+}} xmm2 = xmm0[0],xmm2[1],xmm0[2,3] -; X64-NEXT: insertps {{.*#+}} xmm2 = xmm2[0,1],xmm0[1],zero -; X64-NEXT: insertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm1[2] -; X64-NEXT: movaps %xmm2, %xmm0 +; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero +; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[2] ; X64-NEXT: retq %vecext = extractelement <4 x float> %x, i32 0 %vecinit = insertelement <4 x float> undef, float %vecext, i32 0 @@ -642,12 +608,16 @@ define <4 x i32> @i32_shuf_XYY0(<4 x i32> %x, <4 x i32> %a) { ; X32-LABEL: i32_shuf_XYY0: ; X32: ## BB#0: -; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,1],zero +; X32-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,1,1,3] +; X32-NEXT: pxor %xmm0, %xmm0 +; X32-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4,5],xmm0[6,7] ; X32-NEXT: retl ; ; X64-LABEL: i32_shuf_XYY0: ; X64: ## BB#0: -; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,1],zero +; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,1,1,3] +; X64-NEXT: pxor %xmm0, %xmm0 +; X64-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4,5],xmm0[6,7] ; X64-NEXT: retq %vecext = extractelement <4 x i32> %x, i32 0 %vecinit = insertelement <4 x i32> undef, i32 %vecext, i32 0 @@ -661,12 +631,16 @@ define <4 x i32> @i32_shuf_XYW0(<4 x i32> %x, <4 x i32> %a) { ; X32-LABEL: i32_shuf_XYW0: ; X32: ## BB#0: -; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,3],zero +; X32-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,1,3,3] +; X32-NEXT: pxor %xmm0, %xmm0 +; X32-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4,5],xmm0[6,7] ; X32-NEXT: retl ; ; X64-LABEL: i32_shuf_XYW0: ; X64: ## BB#0: -; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,3],zero +; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,1,3,3] +; X64-NEXT: pxor %xmm0, %xmm0 +; X64-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4,5],xmm0[6,7] ; X64-NEXT: retq %vecext = extractelement <4 x i32> %x, i32 0 %vecinit = insertelement <4 x i32> undef, i32 %vecext, i32 0 @@ -681,12 +655,16 @@ define <4 x i32> @i32_shuf_W00W(<4 x i32> %x, <4 x i32> %a) { ; X32-LABEL: i32_shuf_W00W: ; X32: ## BB#0: -; X32-NEXT: insertps {{.*#+}} xmm0 = xmm0[3],zero,zero,xmm0[3] +; X32-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3] +; X32-NEXT: pxor %xmm0, %xmm0 +; X32-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5],xmm1[6,7] ; X32-NEXT: retl ; ; X64-LABEL: i32_shuf_W00W: ; X64: ## BB#0: -; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[3],zero,zero,xmm0[3] +; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm0[3,1,2,3] +; X64-NEXT: pxor %xmm0, %xmm0 +; X64-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5],xmm1[6,7] ; X64-NEXT: retq %vecext = extractelement <4 x i32> %x, i32 3 %vecinit = insertelement <4 x i32> undef, i32 %vecext, i32 0 @@ -723,18 +701,16 @@ define <4 x i32> @i32_shuf_X00X(<4 x i32> %x, <4 x i32> %a) { ; X32-LABEL: i32_shuf_X00X: ; X32: ## BB#0: -; X32-NEXT: pxor %xmm1, %xmm1 -; X32-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3,4,5,6,7] -; X32-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,0] -; X32-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4,5],xmm0[6,7] +; X32-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,1,2,0] +; X32-NEXT: pxor %xmm0, %xmm0 +; X32-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5],xmm1[6,7] ; X32-NEXT: retl ; ; X64-LABEL: i32_shuf_X00X: ; X64: ## BB#0: -; X64-NEXT: pxor %xmm1, %xmm1 -; X64-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3,4,5,6,7] -; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,0] -; X64-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2,3,4,5],xmm0[6,7] +; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,1,2,0] +; X64-NEXT: pxor %xmm0, %xmm0 +; X64-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5],xmm1[6,7] ; X64-NEXT: retq %vecext = extractelement <4 x i32> %x, i32 0 %vecinit = insertelement <4 x i32> undef, i32 %vecext, i32 0 @@ -748,7 +724,6 @@ ; X32-LABEL: i32_shuf_X0YC: ; X32: ## BB#0: ; X32-NEXT: pmovzxdq {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero -; X32-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,6,7] ; X32-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,1,2,2] ; X32-NEXT: pblendw {{.*#+}} xmm0 = xmm2[0,1,2,3,4,5],xmm0[6,7] ; X32-NEXT: retl @@ -756,7 +731,6 @@ ; X64-LABEL: i32_shuf_X0YC: ; X64: ## BB#0: ; X64-NEXT: pmovzxdq {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero -; X64-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,6,7] ; X64-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,1,2,2] ; X64-NEXT: pblendw {{.*#+}} xmm0 = xmm2[0,1,2,3,4,5],xmm0[6,7] ; X64-NEXT: retq @@ -827,7 +801,7 @@ ; X64: ## BB#0: ; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],mem[0] ; X64-NEXT: retq - %1 = load <4 x float>* %pb, align 16 + %1 = load <4 x float>, <4 x float>* %pb, align 16 %2 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %1, i32 48) ret <4 x float> %2 } @@ -845,7 +819,7 @@ ; X64: ## BB#0: ; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1],mem[1],xmm0[3] ; X64-NEXT: retq - %1 = load <4 x float>* %pb, align 16 + %1 = load <4 x float>, <4 x float>* %pb, align 16 %2 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %1, i32 96) ret <4 x float> %2 } @@ -865,8 +839,8 @@ ; X64-NEXT: shlq $4, %rsi ; X64-NEXT: insertps {{.*#+}} xmm0 = mem[3],xmm0[1,2,3] ; X64-NEXT: retq - %1 = getelementptr inbounds <4 x float>* %pb, i64 %index - %2 = load <4 x float>* %1, align 16 + %1 = getelementptr inbounds <4 x float>, <4 x float>* %pb, i64 %index + %2 = load <4 x float>, <4 x float>* %1, align 16 %3 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %2, i32 192) ret <4 x float> %3 } @@ -887,8 +861,8 @@ ; X64-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0,0,0] ; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0] ; X64-NEXT: retq - %1 = getelementptr inbounds float* %fb, i64 %index - %2 = load float* %1, align 4 + %1 = getelementptr inbounds float, float* %fb, i64 %index + %2 = load float, float* %1, align 4 %3 = insertelement <4 x float> undef, float %2, i32 0 %4 = insertelement <4 x float> %3, float %2, i32 1 %5 = insertelement <4 x float> %4, float %2, i32 2 @@ -912,7 +886,7 @@ ; X64-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0,0,0] ; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0,1,2],xmm1[0] ; X64-NEXT: retq - %1 = load <4 x float>* %b, align 4 + %1 = load <4 x float>, <4 x float>* %b, align 4 %2 = extractelement <4 x float> %1, i32 0 %3 = insertelement <4 x float> undef, float %2, i32 0 %4 = insertelement <4 x float> %3, float %2, i32 1 @@ -951,8 +925,8 @@ ; X64-NEXT: addps %xmm2, %xmm3 ; X64-NEXT: addps %xmm3, %xmm0 ; X64-NEXT: retq - %1 = getelementptr inbounds float* %fb, i64 %index - %2 = load float* %1, align 4 + %1 = getelementptr inbounds float, float* %fb, i64 %index + %2 = load float, float* %1, align 4 %3 = insertelement <4 x float> undef, float %2, i32 0 %4 = insertelement <4 x float> %3, float %2, i32 1 %5 = insertelement <4 x float> %4, float %2, i32 2 @@ -982,7 +956,7 @@ ; X64-NEXT: unpcklpd {{.*#+}} xmm1 = xmm1[0],xmm0[0] ; X64-NEXT: movapd %xmm1, %xmm0 ; X64-NEXT: retq - %1 = load float* %b, align 4 + %1 = load float, float* %b, align 4 %2 = insertelement <4 x float> undef, float %1, i32 0 %result = shufflevector <4 x float> %a, <4 x float> %2, <4 x i32> <i32 4, i32 undef, i32 0, i32 7> ret <4 x float> %result @@ -1001,31 +975,28 @@ ; X64: ## BB#0: ; X64-NEXT: insertps {{.*#+}} xmm0 = xmm0[0],zero,xmm0[2],mem[2] ; X64-NEXT: retq - %load = load <4 x float> *%ptr + %load = load <4 x float> , <4 x float> *%ptr %ret = shufflevector <4 x float> %load, <4 x float> %a, <4 x i32> <i32 4, i32 undef, i32 6, i32 2> ret <4 x float> %ret } ; Edge case for insertps where we end up with a shuffle with mask=<0, 7, -1, -1> -define void @insertps_pr20411(i32* noalias nocapture %RET) #1 { +define void @insertps_pr20411(<4 x i32> %shuffle109, <4 x i32> %shuffle116, i32* noalias nocapture %RET) #1 { ; X32-LABEL: insertps_pr20411: ; X32: ## BB#0: ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax -; X32-NEXT: pshufd {{.*#+}} xmm0 = mem[2,3,0,1] -; X32-NEXT: pblendw {{.*#+}} xmm0 = mem[0,1],xmm0[2,3],mem[4,5,6,7] -; X32-NEXT: movdqu %xmm0, (%eax) +; X32-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] +; X32-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7] +; X32-NEXT: movdqu %xmm1, (%eax) ; X32-NEXT: retl ; ; X64-LABEL: insertps_pr20411: ; X64: ## BB#0: -; X64-NEXT: pshufd {{.*#+}} xmm0 = mem[2,3,0,1] -; X64-NEXT: pblendw {{.*#+}} xmm0 = mem[0,1],xmm0[2,3],mem[4,5,6,7] -; X64-NEXT: movdqu %xmm0, (%rdi) +; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] +; X64-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3],xmm0[4,5,6,7] +; X64-NEXT: movdqu %xmm1, (%rdi) ; X64-NEXT: retq - %gather_load = shufflevector <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>, <8 x i32> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7> - %shuffle109 = shufflevector <4 x i32> <i32 4, i32 5, i32 6, i32 7>, <4 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3> ; 4 5 6 7 - %shuffle116 = shufflevector <8 x i32> %gather_load, <8 x i32> undef, <4 x i32> <i32 3, i32 undef, i32 undef, i32 undef> ; 3 x x x - %shuffle117 = shufflevector <4 x i32> %shuffle109, <4 x i32> %shuffle116, <4 x i32> <i32 4, i32 3, i32 undef, i32 undef> ; 3 7 x x + %shuffle117 = shufflevector <4 x i32> %shuffle109, <4 x i32> %shuffle116, <4 x i32> <i32 0, i32 7, i32 undef, i32 undef> %ptrcast = bitcast i32* %RET to <4 x i32>* store <4 x i32> %shuffle117, <4 x i32>* %ptrcast, align 4 ret void