comparison test/CodeGen/X86/vshift-2.ll @ 120:1172e4bd9c6f

update 4.0.0
author mir3636
date Fri, 25 Nov 2016 19:14:25 +0900
parents 95c75e76d11b
children 803732b1fca8
comparison
equal deleted inserted replaced
101:34baf5011add 120:1172e4bd9c6f
1 ; RUN: llc < %s -march=x86 -mattr=+sse2 | FileCheck %s 1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse2 | FileCheck %s --check-prefix=CHECK --check-prefix=X32
3 ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2 | FileCheck %s --check-prefix=CHECK --check-prefix=X64
2 4
3 ; test vector shifts converted to proper SSE2 vector shifts when the shift 5 ; test vector shifts converted to proper SSE2 vector shifts when the shift
4 ; amounts are the same. 6 ; amounts are the same.
5 7
6 define void @shift1a(<2 x i64> %val, <2 x i64>* %dst) nounwind { 8 define void @shift1a(<2 x i64> %val, <2 x i64>* %dst) nounwind {
9 ; X32-LABEL: shift1a:
10 ; X32: # BB#0: # %entry
11 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
12 ; X32-NEXT: psrlq $32, %xmm0
13 ; X32-NEXT: movdqa %xmm0, (%eax)
14 ; X32-NEXT: retl
15 ;
16 ; X64-LABEL: shift1a:
17 ; X64: # BB#0: # %entry
18 ; X64-NEXT: psrlq $32, %xmm0
19 ; X64-NEXT: movdqa %xmm0, (%rdi)
20 ; X64-NEXT: retq
7 entry: 21 entry:
8 ; CHECK-LABEL: shift1a:
9 ; CHECK: psrlq
10 %lshr = lshr <2 x i64> %val, < i64 32, i64 32 > 22 %lshr = lshr <2 x i64> %val, < i64 32, i64 32 >
11 store <2 x i64> %lshr, <2 x i64>* %dst 23 store <2 x i64> %lshr, <2 x i64>* %dst
12 ret void 24 ret void
13 } 25 }
14 26
15 define void @shift1b(<2 x i64> %val, <2 x i64>* %dst, i64 %amt) nounwind { 27 define void @shift1b(<2 x i64> %val, <2 x i64>* %dst, i64 %amt) nounwind {
28 ; X32-LABEL: shift1b:
29 ; X32: # BB#0: # %entry
30 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
31 ; X32-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
32 ; X32-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
33 ; X32-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
34 ; X32-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,1,1]
35 ; X32-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
36 ; X32-NEXT: psrlq %xmm2, %xmm0
37 ; X32-NEXT: movdqa %xmm0, (%eax)
38 ; X32-NEXT: retl
39 ;
40 ; X64-LABEL: shift1b:
41 ; X64: # BB#0: # %entry
42 ; X64-NEXT: movd %rsi, %xmm1
43 ; X64-NEXT: psrlq %xmm1, %xmm0
44 ; X64-NEXT: movdqa %xmm0, (%rdi)
45 ; X64-NEXT: retq
16 entry: 46 entry:
17 ; CHECK-LABEL: shift1b:
18 ; CHECK: movd
19 ; CHECK: psrlq
20 %0 = insertelement <2 x i64> undef, i64 %amt, i32 0 47 %0 = insertelement <2 x i64> undef, i64 %amt, i32 0
21 %1 = insertelement <2 x i64> %0, i64 %amt, i32 1 48 %1 = insertelement <2 x i64> %0, i64 %amt, i32 1
22 %lshr = lshr <2 x i64> %val, %1 49 %lshr = lshr <2 x i64> %val, %1
23 store <2 x i64> %lshr, <2 x i64>* %dst 50 store <2 x i64> %lshr, <2 x i64>* %dst
24 ret void 51 ret void
25 } 52 }
26 53
27 define void @shift2a(<4 x i32> %val, <4 x i32>* %dst) nounwind { 54 define void @shift2a(<4 x i32> %val, <4 x i32>* %dst) nounwind {
55 ; X32-LABEL: shift2a:
56 ; X32: # BB#0: # %entry
57 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
58 ; X32-NEXT: psrld $17, %xmm0
59 ; X32-NEXT: movdqa %xmm0, (%eax)
60 ; X32-NEXT: retl
61 ;
62 ; X64-LABEL: shift2a:
63 ; X64: # BB#0: # %entry
64 ; X64-NEXT: psrld $17, %xmm0
65 ; X64-NEXT: movdqa %xmm0, (%rdi)
66 ; X64-NEXT: retq
28 entry: 67 entry:
29 ; CHECK-LABEL: shift2a:
30 ; CHECK: psrld
31 %lshr = lshr <4 x i32> %val, < i32 17, i32 17, i32 17, i32 17 > 68 %lshr = lshr <4 x i32> %val, < i32 17, i32 17, i32 17, i32 17 >
32 store <4 x i32> %lshr, <4 x i32>* %dst 69 store <4 x i32> %lshr, <4 x i32>* %dst
33 ret void 70 ret void
34 } 71 }
35 72
36 define void @shift2b(<4 x i32> %val, <4 x i32>* %dst, i32 %amt) nounwind { 73 define void @shift2b(<4 x i32> %val, <4 x i32>* %dst, i32 %amt) nounwind {
74 ; X32-LABEL: shift2b:
75 ; X32: # BB#0: # %entry
76 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
77 ; X32-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
78 ; X32-NEXT: psrld %xmm1, %xmm0
79 ; X32-NEXT: movdqa %xmm0, (%eax)
80 ; X32-NEXT: retl
81 ;
82 ; X64-LABEL: shift2b:
83 ; X64: # BB#0: # %entry
84 ; X64-NEXT: movd %esi, %xmm1
85 ; X64-NEXT: psrld %xmm1, %xmm0
86 ; X64-NEXT: movdqa %xmm0, (%rdi)
87 ; X64-NEXT: retq
37 entry: 88 entry:
38 ; CHECK-LABEL: shift2b:
39 ; CHECK: movd
40 ; CHECK: psrld
41 %0 = insertelement <4 x i32> undef, i32 %amt, i32 0 89 %0 = insertelement <4 x i32> undef, i32 %amt, i32 0
42 %1 = insertelement <4 x i32> %0, i32 %amt, i32 1 90 %1 = insertelement <4 x i32> %0, i32 %amt, i32 1
43 %2 = insertelement <4 x i32> %1, i32 %amt, i32 2 91 %2 = insertelement <4 x i32> %1, i32 %amt, i32 2
44 %3 = insertelement <4 x i32> %2, i32 %amt, i32 3 92 %3 = insertelement <4 x i32> %2, i32 %amt, i32 3
45 %lshr = lshr <4 x i32> %val, %3 93 %lshr = lshr <4 x i32> %val, %3
47 ret void 95 ret void
48 } 96 }
49 97
50 98
51 define void @shift3a(<8 x i16> %val, <8 x i16>* %dst) nounwind { 99 define void @shift3a(<8 x i16> %val, <8 x i16>* %dst) nounwind {
100 ; X32-LABEL: shift3a:
101 ; X32: # BB#0: # %entry
102 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
103 ; X32-NEXT: psrlw $5, %xmm0
104 ; X32-NEXT: movdqa %xmm0, (%eax)
105 ; X32-NEXT: retl
106 ;
107 ; X64-LABEL: shift3a:
108 ; X64: # BB#0: # %entry
109 ; X64-NEXT: psrlw $5, %xmm0
110 ; X64-NEXT: movdqa %xmm0, (%rdi)
111 ; X64-NEXT: retq
52 entry: 112 entry:
53 ; CHECK-LABEL: shift3a:
54 ; CHECK: psrlw
55 %lshr = lshr <8 x i16> %val, < i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5 > 113 %lshr = lshr <8 x i16> %val, < i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5 >
56 store <8 x i16> %lshr, <8 x i16>* %dst 114 store <8 x i16> %lshr, <8 x i16>* %dst
57 ret void 115 ret void
58 } 116 }
59 117
60 ; properly zero extend the shift amount 118 ; properly zero extend the shift amount
61 define void @shift3b(<8 x i16> %val, <8 x i16>* %dst, i16 %amt) nounwind { 119 define void @shift3b(<8 x i16> %val, <8 x i16>* %dst, i16 %amt) nounwind {
120 ; X32-LABEL: shift3b:
121 ; X32: # BB#0: # %entry
122 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
123 ; X32-NEXT: movzwl {{[0-9]+}}(%esp), %ecx
124 ; X32-NEXT: movd %ecx, %xmm1
125 ; X32-NEXT: psrlw %xmm1, %xmm0
126 ; X32-NEXT: movdqa %xmm0, (%eax)
127 ; X32-NEXT: retl
128 ;
129 ; X64-LABEL: shift3b:
130 ; X64: # BB#0: # %entry
131 ; X64-NEXT: movzwl %si, %eax
132 ; X64-NEXT: movd %eax, %xmm1
133 ; X64-NEXT: psrlw %xmm1, %xmm0
134 ; X64-NEXT: movdqa %xmm0, (%rdi)
135 ; X64-NEXT: retq
62 entry: 136 entry:
63 ; CHECK-LABEL: shift3b:
64 ; CHECK: movzwl
65 ; CHECK: movd
66 ; CHECK: psrlw
67 %0 = insertelement <8 x i16> undef, i16 %amt, i32 0 137 %0 = insertelement <8 x i16> undef, i16 %amt, i32 0
68 %1 = insertelement <8 x i16> %0, i16 %amt, i32 1 138 %1 = insertelement <8 x i16> %0, i16 %amt, i32 1
69 %2 = insertelement <8 x i16> %1, i16 %amt, i32 2 139 %2 = insertelement <8 x i16> %1, i16 %amt, i32 2
70 %3 = insertelement <8 x i16> %2, i16 %amt, i32 3 140 %3 = insertelement <8 x i16> %2, i16 %amt, i32 3
71 %4 = insertelement <8 x i16> %3, i16 %amt, i32 4 141 %4 = insertelement <8 x i16> %3, i16 %amt, i32 4