comparison test/Transforms/InstCombine/x86-xop.ll @ 120:1172e4bd9c6f

update 4.0.0
author mir3636
date Fri, 25 Nov 2016 19:14:25 +0900
parents afa8332a0e37
children
comparison
equal deleted inserted replaced
101:34baf5011add 120:1172e4bd9c6f
1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
1 ; RUN: opt < %s -instcombine -S | FileCheck %s 2 ; RUN: opt < %s -instcombine -S | FileCheck %s
2 3
4 define double @test_vfrcz_sd_0(double %a) {
5 ; CHECK-LABEL: @test_vfrcz_sd_0(
6 ; CHECK-NEXT: [[TMP1:%.*]] = insertelement <2 x double> undef, double %a, i32 0
7 ; CHECK-NEXT: [[TMP2:%.*]] = tail call <2 x double> @llvm.x86.xop.vfrcz.sd(<2 x double> [[TMP1]])
8 ; CHECK-NEXT: [[TMP3:%.*]] = extractelement <2 x double> [[TMP2]], i32 0
9 ; CHECK-NEXT: ret double [[TMP3]]
10 ;
11 %1 = insertelement <2 x double> undef, double %a, i32 0
12 %2 = insertelement <2 x double> %1, double 1.000000e+00, i32 1
13 %3 = tail call <2 x double> @llvm.x86.xop.vfrcz.sd(<2 x double> %2)
14 %4 = extractelement <2 x double> %3, i32 0
15 ret double %4
16 }
17
18 define double @test_vfrcz_sd_1(double %a) {
19 ; CHECK-LABEL: @test_vfrcz_sd_1(
20 ; CHECK-NEXT: ret double 1.000000e+00
21 ;
22 %1 = insertelement <2 x double> undef, double %a, i32 0
23 %2 = insertelement <2 x double> %1, double 1.000000e+00, i32 1
24 %3 = tail call <2 x double> @llvm.x86.xop.vfrcz.sd(<2 x double> %2)
25 %4 = extractelement <2 x double> %3, i32 1
26 ret double %4
27 }
28
29 define float @test_vfrcz_ss_0(float %a) {
30 ; CHECK-LABEL: @test_vfrcz_ss_0(
31 ; CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x float> undef, float %a, i32 0
32 ; CHECK-NEXT: [[TMP2:%.*]] = tail call <4 x float> @llvm.x86.xop.vfrcz.ss(<4 x float> [[TMP1]])
33 ; CHECK-NEXT: [[TMP3:%.*]] = extractelement <4 x float> [[TMP2]], i32 0
34 ; CHECK-NEXT: ret float [[TMP3]]
35 ;
36 %1 = insertelement <4 x float> undef, float %a, i32 0
37 %2 = insertelement <4 x float> %1, float 1.000000e+00, i32 1
38 %3 = insertelement <4 x float> %2, float 2.000000e+00, i32 2
39 %4 = insertelement <4 x float> %3, float 3.000000e+00, i32 3
40 %5 = tail call <4 x float> @llvm.x86.xop.vfrcz.ss(<4 x float> %4)
41 %6 = extractelement <4 x float> %5, i32 0
42 ret float %6
43 }
44
45 define float @test_vfrcz_ss_3(float %a) {
46 ; CHECK-LABEL: @test_vfrcz_ss_3(
47 ; CHECK-NEXT: ret float 3.000000e+00
48 ;
49 %1 = insertelement <4 x float> undef, float %a, i32 0
50 %2 = insertelement <4 x float> %1, float 1.000000e+00, i32 1
51 %3 = insertelement <4 x float> %2, float 2.000000e+00, i32 2
52 %4 = insertelement <4 x float> %3, float 3.000000e+00, i32 3
53 %5 = tail call <4 x float> @llvm.x86.xop.vfrcz.ss(<4 x float> %4)
54 %6 = extractelement <4 x float> %5, i32 3
55 ret float %6
56 }
57
3 define <2 x i64> @cmp_slt_v2i64(<2 x i64> %a, <2 x i64> %b) { 58 define <2 x i64> @cmp_slt_v2i64(<2 x i64> %a, <2 x i64> %b) {
4 ; CHECK-LABEL: @cmp_slt_v2i64 59 ; CHECK-LABEL: @cmp_slt_v2i64(
5 ; CHECK-NEXT: %1 = icmp slt <2 x i64> %a, %b 60 ; CHECK-NEXT: [[TMP1:%.*]] = icmp slt <2 x i64> %a, %b
6 ; CHECK-NEXT: %2 = sext <2 x i1> %1 to <2 x i64> 61 ; CHECK-NEXT: [[TMP2:%.*]] = sext <2 x i1> [[TMP1]] to <2 x i64>
7 ; CHECK-NEXT: ret <2 x i64> %2 62 ; CHECK-NEXT: ret <2 x i64> [[TMP2]]
63 ;
8 %1 = tail call <2 x i64> @llvm.x86.xop.vpcomltq(<2 x i64> %a, <2 x i64> %b) 64 %1 = tail call <2 x i64> @llvm.x86.xop.vpcomltq(<2 x i64> %a, <2 x i64> %b)
9 ret <2 x i64> %1 65 ret <2 x i64> %1
10 } 66 }
11 67
12 define <2 x i64> @cmp_ult_v2i64(<2 x i64> %a, <2 x i64> %b) { 68 define <2 x i64> @cmp_ult_v2i64(<2 x i64> %a, <2 x i64> %b) {
13 ; CHECK-LABEL: @cmp_ult_v2i64 69 ; CHECK-LABEL: @cmp_ult_v2i64(
14 ; CHECK-NEXT: %1 = icmp ult <2 x i64> %a, %b 70 ; CHECK-NEXT: [[TMP1:%.*]] = icmp ult <2 x i64> %a, %b
15 ; CHECK-NEXT: %2 = sext <2 x i1> %1 to <2 x i64> 71 ; CHECK-NEXT: [[TMP2:%.*]] = sext <2 x i1> [[TMP1]] to <2 x i64>
16 ; CHECK-NEXT: ret <2 x i64> %2 72 ; CHECK-NEXT: ret <2 x i64> [[TMP2]]
73 ;
17 %1 = tail call <2 x i64> @llvm.x86.xop.vpcomltuq(<2 x i64> %a, <2 x i64> %b) 74 %1 = tail call <2 x i64> @llvm.x86.xop.vpcomltuq(<2 x i64> %a, <2 x i64> %b)
18 ret <2 x i64> %1 75 ret <2 x i64> %1
19 } 76 }
20 77
21 define <2 x i64> @cmp_sle_v2i64(<2 x i64> %a, <2 x i64> %b) { 78 define <2 x i64> @cmp_sle_v2i64(<2 x i64> %a, <2 x i64> %b) {
22 ; CHECK-LABEL: @cmp_sle_v2i64 79 ; CHECK-LABEL: @cmp_sle_v2i64(
23 ; CHECK-NEXT: %1 = icmp sle <2 x i64> %a, %b 80 ; CHECK-NEXT: [[TMP1:%.*]] = icmp sle <2 x i64> %a, %b
24 ; CHECK-NEXT: %2 = sext <2 x i1> %1 to <2 x i64> 81 ; CHECK-NEXT: [[TMP2:%.*]] = sext <2 x i1> [[TMP1]] to <2 x i64>
25 ; CHECK-NEXT: ret <2 x i64> %2 82 ; CHECK-NEXT: ret <2 x i64> [[TMP2]]
83 ;
26 %1 = tail call <2 x i64> @llvm.x86.xop.vpcomleq(<2 x i64> %a, <2 x i64> %b) 84 %1 = tail call <2 x i64> @llvm.x86.xop.vpcomleq(<2 x i64> %a, <2 x i64> %b)
27 ret <2 x i64> %1 85 ret <2 x i64> %1
28 } 86 }
29 87
30 define <2 x i64> @cmp_ule_v2i64(<2 x i64> %a, <2 x i64> %b) { 88 define <2 x i64> @cmp_ule_v2i64(<2 x i64> %a, <2 x i64> %b) {
31 ; CHECK-LABEL: @cmp_ule_v2i64 89 ; CHECK-LABEL: @cmp_ule_v2i64(
32 ; CHECK-NEXT: %1 = icmp ule <2 x i64> %a, %b 90 ; CHECK-NEXT: [[TMP1:%.*]] = icmp ule <2 x i64> %a, %b
33 ; CHECK-NEXT: %2 = sext <2 x i1> %1 to <2 x i64> 91 ; CHECK-NEXT: [[TMP2:%.*]] = sext <2 x i1> [[TMP1]] to <2 x i64>
34 ; CHECK-NEXT: ret <2 x i64> %2 92 ; CHECK-NEXT: ret <2 x i64> [[TMP2]]
93 ;
35 %1 = tail call <2 x i64> @llvm.x86.xop.vpcomleuq(<2 x i64> %a, <2 x i64> %b) 94 %1 = tail call <2 x i64> @llvm.x86.xop.vpcomleuq(<2 x i64> %a, <2 x i64> %b)
36 ret <2 x i64> %1 95 ret <2 x i64> %1
37 } 96 }
38 97
39 define <4 x i32> @cmp_sgt_v4i32(<4 x i32> %a, <4 x i32> %b) { 98 define <4 x i32> @cmp_sgt_v4i32(<4 x i32> %a, <4 x i32> %b) {
40 ; CHECK-LABEL: @cmp_sgt_v4i32 99 ; CHECK-LABEL: @cmp_sgt_v4i32(
41 ; CHECK-NEXT: %1 = icmp sgt <4 x i32> %a, %b 100 ; CHECK-NEXT: [[TMP1:%.*]] = icmp sgt <4 x i32> %a, %b
42 ; CHECK-NEXT: %2 = sext <4 x i1> %1 to <4 x i32> 101 ; CHECK-NEXT: [[TMP2:%.*]] = sext <4 x i1> [[TMP1]] to <4 x i32>
43 ; CHECK-NEXT: ret <4 x i32> %2 102 ; CHECK-NEXT: ret <4 x i32> [[TMP2]]
103 ;
44 %1 = tail call <4 x i32> @llvm.x86.xop.vpcomgtd(<4 x i32> %a, <4 x i32> %b) 104 %1 = tail call <4 x i32> @llvm.x86.xop.vpcomgtd(<4 x i32> %a, <4 x i32> %b)
45 ret <4 x i32> %1 105 ret <4 x i32> %1
46 } 106 }
47 107
48 define <4 x i32> @cmp_ugt_v4i32(<4 x i32> %a, <4 x i32> %b) { 108 define <4 x i32> @cmp_ugt_v4i32(<4 x i32> %a, <4 x i32> %b) {
49 ; CHECK-LABEL: @cmp_ugt_v4i32 109 ; CHECK-LABEL: @cmp_ugt_v4i32(
50 ; CHECK-NEXT: %1 = icmp ugt <4 x i32> %a, %b 110 ; CHECK-NEXT: [[TMP1:%.*]] = icmp ugt <4 x i32> %a, %b
51 ; CHECK-NEXT: %2 = sext <4 x i1> %1 to <4 x i32> 111 ; CHECK-NEXT: [[TMP2:%.*]] = sext <4 x i1> [[TMP1]] to <4 x i32>
52 ; CHECK-NEXT: ret <4 x i32> %2 112 ; CHECK-NEXT: ret <4 x i32> [[TMP2]]
113 ;
53 %1 = tail call <4 x i32> @llvm.x86.xop.vpcomgtud(<4 x i32> %a, <4 x i32> %b) 114 %1 = tail call <4 x i32> @llvm.x86.xop.vpcomgtud(<4 x i32> %a, <4 x i32> %b)
54 ret <4 x i32> %1 115 ret <4 x i32> %1
55 } 116 }
56 117
57 define <4 x i32> @cmp_sge_v4i32(<4 x i32> %a, <4 x i32> %b) { 118 define <4 x i32> @cmp_sge_v4i32(<4 x i32> %a, <4 x i32> %b) {
58 ; CHECK-LABEL: @cmp_sge_v4i32 119 ; CHECK-LABEL: @cmp_sge_v4i32(
59 ; CHECK-NEXT: %1 = icmp sge <4 x i32> %a, %b 120 ; CHECK-NEXT: [[TMP1:%.*]] = icmp sge <4 x i32> %a, %b
60 ; CHECK-NEXT: %2 = sext <4 x i1> %1 to <4 x i32> 121 ; CHECK-NEXT: [[TMP2:%.*]] = sext <4 x i1> [[TMP1]] to <4 x i32>
61 ; CHECK-NEXT: ret <4 x i32> %2 122 ; CHECK-NEXT: ret <4 x i32> [[TMP2]]
123 ;
62 %1 = tail call <4 x i32> @llvm.x86.xop.vpcomged(<4 x i32> %a, <4 x i32> %b) 124 %1 = tail call <4 x i32> @llvm.x86.xop.vpcomged(<4 x i32> %a, <4 x i32> %b)
63 ret <4 x i32> %1 125 ret <4 x i32> %1
64 } 126 }
65 127
66 define <4 x i32> @cmp_uge_v4i32(<4 x i32> %a, <4 x i32> %b) { 128 define <4 x i32> @cmp_uge_v4i32(<4 x i32> %a, <4 x i32> %b) {
67 ; CHECK-LABEL: @cmp_uge_v4i32 129 ; CHECK-LABEL: @cmp_uge_v4i32(
68 ; CHECK-NEXT: %1 = icmp uge <4 x i32> %a, %b 130 ; CHECK-NEXT: [[TMP1:%.*]] = icmp uge <4 x i32> %a, %b
69 ; CHECK-NEXT: %2 = sext <4 x i1> %1 to <4 x i32> 131 ; CHECK-NEXT: [[TMP2:%.*]] = sext <4 x i1> [[TMP1]] to <4 x i32>
70 ; CHECK-NEXT: ret <4 x i32> %2 132 ; CHECK-NEXT: ret <4 x i32> [[TMP2]]
133 ;
71 %1 = tail call <4 x i32> @llvm.x86.xop.vpcomgeud(<4 x i32> %a, <4 x i32> %b) 134 %1 = tail call <4 x i32> @llvm.x86.xop.vpcomgeud(<4 x i32> %a, <4 x i32> %b)
72 ret <4 x i32> %1 135 ret <4 x i32> %1
73 } 136 }
74 137
75 define <8 x i16> @cmp_seq_v8i16(<8 x i16> %a, <8 x i16> %b) { 138 define <8 x i16> @cmp_seq_v8i16(<8 x i16> %a, <8 x i16> %b) {
76 ; CHECK-LABEL: @cmp_seq_v8i16 139 ; CHECK-LABEL: @cmp_seq_v8i16(
77 ; CHECK-NEXT: %1 = icmp eq <8 x i16> %a, %b 140 ; CHECK-NEXT: [[TMP1:%.*]] = icmp eq <8 x i16> %a, %b
78 ; CHECK-NEXT: %2 = sext <8 x i1> %1 to <8 x i16> 141 ; CHECK-NEXT: [[TMP2:%.*]] = sext <8 x i1> [[TMP1]] to <8 x i16>
79 ; CHECK-NEXT: ret <8 x i16> %2 142 ; CHECK-NEXT: ret <8 x i16> [[TMP2]]
143 ;
80 %1 = tail call <8 x i16> @llvm.x86.xop.vpcomeqw(<8 x i16> %a, <8 x i16> %b) 144 %1 = tail call <8 x i16> @llvm.x86.xop.vpcomeqw(<8 x i16> %a, <8 x i16> %b)
81 ret <8 x i16> %1 145 ret <8 x i16> %1
82 } 146 }
83 147
84 define <8 x i16> @cmp_ueq_v8i16(<8 x i16> %a, <8 x i16> %b) { 148 define <8 x i16> @cmp_ueq_v8i16(<8 x i16> %a, <8 x i16> %b) {
85 ; CHECK-LABEL: @cmp_ueq_v8i16 149 ; CHECK-LABEL: @cmp_ueq_v8i16(
86 ; CHECK-NEXT: %1 = icmp eq <8 x i16> %a, %b 150 ; CHECK-NEXT: [[TMP1:%.*]] = icmp eq <8 x i16> %a, %b
87 ; CHECK-NEXT: %2 = sext <8 x i1> %1 to <8 x i16> 151 ; CHECK-NEXT: [[TMP2:%.*]] = sext <8 x i1> [[TMP1]] to <8 x i16>
88 ; CHECK-NEXT: ret <8 x i16> %2 152 ; CHECK-NEXT: ret <8 x i16> [[TMP2]]
153 ;
89 %1 = tail call <8 x i16> @llvm.x86.xop.vpcomequw(<8 x i16> %a, <8 x i16> %b) 154 %1 = tail call <8 x i16> @llvm.x86.xop.vpcomequw(<8 x i16> %a, <8 x i16> %b)
90 ret <8 x i16> %1 155 ret <8 x i16> %1
91 } 156 }
92 157
93 define <8 x i16> @cmp_sne_v8i16(<8 x i16> %a, <8 x i16> %b) { 158 define <8 x i16> @cmp_sne_v8i16(<8 x i16> %a, <8 x i16> %b) {
94 ; CHECK-LABEL: @cmp_sne_v8i16 159 ; CHECK-LABEL: @cmp_sne_v8i16(
95 ; CHECK-NEXT: %1 = icmp ne <8 x i16> %a, %b 160 ; CHECK-NEXT: [[TMP1:%.*]] = icmp ne <8 x i16> %a, %b
96 ; CHECK-NEXT: %2 = sext <8 x i1> %1 to <8 x i16> 161 ; CHECK-NEXT: [[TMP2:%.*]] = sext <8 x i1> [[TMP1]] to <8 x i16>
97 ; CHECK-NEXT: ret <8 x i16> %2 162 ; CHECK-NEXT: ret <8 x i16> [[TMP2]]
163 ;
98 %1 = tail call <8 x i16> @llvm.x86.xop.vpcomnew(<8 x i16> %a, <8 x i16> %b) 164 %1 = tail call <8 x i16> @llvm.x86.xop.vpcomnew(<8 x i16> %a, <8 x i16> %b)
99 ret <8 x i16> %1 165 ret <8 x i16> %1
100 } 166 }
101 167
102 define <8 x i16> @cmp_une_v8i16(<8 x i16> %a, <8 x i16> %b) { 168 define <8 x i16> @cmp_une_v8i16(<8 x i16> %a, <8 x i16> %b) {
103 ; CHECK-LABEL: @cmp_une_v8i16 169 ; CHECK-LABEL: @cmp_une_v8i16(
104 ; CHECK-NEXT: %1 = icmp ne <8 x i16> %a, %b 170 ; CHECK-NEXT: [[TMP1:%.*]] = icmp ne <8 x i16> %a, %b
105 ; CHECK-NEXT: %2 = sext <8 x i1> %1 to <8 x i16> 171 ; CHECK-NEXT: [[TMP2:%.*]] = sext <8 x i1> [[TMP1]] to <8 x i16>
106 ; CHECK-NEXT: ret <8 x i16> %2 172 ; CHECK-NEXT: ret <8 x i16> [[TMP2]]
173 ;
107 %1 = tail call <8 x i16> @llvm.x86.xop.vpcomneuw(<8 x i16> %a, <8 x i16> %b) 174 %1 = tail call <8 x i16> @llvm.x86.xop.vpcomneuw(<8 x i16> %a, <8 x i16> %b)
108 ret <8 x i16> %1 175 ret <8 x i16> %1
109 } 176 }
110 177
111 define <16 x i8> @cmp_strue_v16i8(<16 x i8> %a, <16 x i8> %b) { 178 define <16 x i8> @cmp_strue_v16i8(<16 x i8> %a, <16 x i8> %b) {
112 ; CHECK-LABEL: @cmp_strue_v16i8 179 ; CHECK-LABEL: @cmp_strue_v16i8(
113 ; CHECK-NEXT: ret <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1> 180 ; CHECK-NEXT: ret <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
181 ;
114 %1 = tail call <16 x i8> @llvm.x86.xop.vpcomtrueb(<16 x i8> %a, <16 x i8> %b) 182 %1 = tail call <16 x i8> @llvm.x86.xop.vpcomtrueb(<16 x i8> %a, <16 x i8> %b)
115 ret <16 x i8> %1 183 ret <16 x i8> %1
116 } 184 }
117 185
118 define <16 x i8> @cmp_utrue_v16i8(<16 x i8> %a, <16 x i8> %b) { 186 define <16 x i8> @cmp_utrue_v16i8(<16 x i8> %a, <16 x i8> %b) {
119 ; CHECK-LABEL: @cmp_utrue_v16i8 187 ; CHECK-LABEL: @cmp_utrue_v16i8(
120 ; CHECK-NEXT: ret <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1> 188 ; CHECK-NEXT: ret <16 x i8> <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
189 ;
121 %1 = tail call <16 x i8> @llvm.x86.xop.vpcomtrueub(<16 x i8> %a, <16 x i8> %b) 190 %1 = tail call <16 x i8> @llvm.x86.xop.vpcomtrueub(<16 x i8> %a, <16 x i8> %b)
122 ret <16 x i8> %1 191 ret <16 x i8> %1
123 } 192 }
124 193
125 define <16 x i8> @cmp_sfalse_v16i8(<16 x i8> %a, <16 x i8> %b) { 194 define <16 x i8> @cmp_sfalse_v16i8(<16 x i8> %a, <16 x i8> %b) {
126 ; CHECK-LABEL: @cmp_sfalse_v16i8 195 ; CHECK-LABEL: @cmp_sfalse_v16i8(
127 ; CHECK-NEXT: ret <16 x i8> zeroinitializer 196 ; CHECK-NEXT: ret <16 x i8> zeroinitializer
197 ;
128 %1 = tail call <16 x i8> @llvm.x86.xop.vpcomfalseb(<16 x i8> %a, <16 x i8> %b) 198 %1 = tail call <16 x i8> @llvm.x86.xop.vpcomfalseb(<16 x i8> %a, <16 x i8> %b)
129 ret <16 x i8> %1 199 ret <16 x i8> %1
130 } 200 }
131 201
132 define <16 x i8> @cmp_ufalse_v16i8(<16 x i8> %a, <16 x i8> %b) { 202 define <16 x i8> @cmp_ufalse_v16i8(<16 x i8> %a, <16 x i8> %b) {
133 ; CHECK-LABEL: @cmp_ufalse_v16i8 203 ; CHECK-LABEL: @cmp_ufalse_v16i8(
134 ; CHECK-NEXT: ret <16 x i8> zeroinitializer 204 ; CHECK-NEXT: ret <16 x i8> zeroinitializer
205 ;
135 %1 = tail call <16 x i8> @llvm.x86.xop.vpcomfalseub(<16 x i8> %a, <16 x i8> %b) 206 %1 = tail call <16 x i8> @llvm.x86.xop.vpcomfalseub(<16 x i8> %a, <16 x i8> %b)
136 ret <16 x i8> %1 207 ret <16 x i8> %1
137 } 208 }
209
210 declare <2 x double> @llvm.x86.xop.vfrcz.sd(<2 x double>) nounwind readnone
211 declare <4 x float> @llvm.x86.xop.vfrcz.ss(<4 x float>) nounwind readnone
138 212
139 declare <16 x i8> @llvm.x86.xop.vpcomltb(<16 x i8>, <16 x i8>) nounwind readnone 213 declare <16 x i8> @llvm.x86.xop.vpcomltb(<16 x i8>, <16 x i8>) nounwind readnone
140 declare <8 x i16> @llvm.x86.xop.vpcomltw(<8 x i16>, <8 x i16>) nounwind readnone 214 declare <8 x i16> @llvm.x86.xop.vpcomltw(<8 x i16>, <8 x i16>) nounwind readnone
141 declare <4 x i32> @llvm.x86.xop.vpcomltd(<4 x i32>, <4 x i32>) nounwind readnone 215 declare <4 x i32> @llvm.x86.xop.vpcomltd(<4 x i32>, <4 x i32>) nounwind readnone
142 declare <2 x i64> @llvm.x86.xop.vpcomltq(<2 x i64>, <2 x i64>) nounwind readnone 216 declare <2 x i64> @llvm.x86.xop.vpcomltq(<2 x i64>, <2 x i64>) nounwind readnone