Mercurial > hg > CbC > CbC_llvm
comparison test/Transforms/InstCombine/masked-merge-xor.ll @ 147:c2174574ed3a
LLVM 10
author | Shinji KONO <kono@ie.u-ryukyu.ac.jp> |
---|---|
date | Wed, 14 Aug 2019 16:55:33 +0900 |
parents | |
children |
comparison
equal
deleted
inserted
replaced
134:3a76565eade5 | 147:c2174574ed3a |
---|---|
1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py | |
2 ; RUN: opt < %s -instcombine -S | FileCheck %s | |
3 | |
4 ; https://bugs.llvm.org/show_bug.cgi?id=6773 | |
5 | |
6 ; Patterns: | |
7 ; (x & m) | (y & ~m) | |
8 ; (x & m) ^ (y & ~m) | |
9 ; (x & m) + (y & ~m) | |
10 ; Should be transformed into: | |
11 ; (x & m) | (y & ~m) | |
12 ; And then into: | |
13 ; ((x ^ y) & m) ^ y | |
14 | |
15 ; ============================================================================ ; | |
16 ; Most basic positive tests | |
17 ; ============================================================================ ; | |
18 | |
19 define i32 @p(i32 %x, i32 %y, i32 %m) { | |
20 ; CHECK-LABEL: @p( | |
21 ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], [[M:%.*]] | |
22 ; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1 | |
23 ; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NEG]], [[Y:%.*]] | |
24 ; CHECK-NEXT: [[RET:%.*]] = or i32 [[AND]], [[AND1]] | |
25 ; CHECK-NEXT: ret i32 [[RET]] | |
26 ; | |
27 %and = and i32 %x, %m | |
28 %neg = xor i32 %m, -1 | |
29 %and1 = and i32 %neg, %y | |
30 %ret = xor i32 %and, %and1 | |
31 ret i32 %ret | |
32 } | |
33 | |
34 define <2 x i32> @p_splatvec(<2 x i32> %x, <2 x i32> %y, <2 x i32> %m) { | |
35 ; CHECK-LABEL: @p_splatvec( | |
36 ; CHECK-NEXT: [[AND:%.*]] = and <2 x i32> [[X:%.*]], [[M:%.*]] | |
37 ; CHECK-NEXT: [[NEG:%.*]] = xor <2 x i32> [[M]], <i32 -1, i32 -1> | |
38 ; CHECK-NEXT: [[AND1:%.*]] = and <2 x i32> [[NEG]], [[Y:%.*]] | |
39 ; CHECK-NEXT: [[RET:%.*]] = or <2 x i32> [[AND]], [[AND1]] | |
40 ; CHECK-NEXT: ret <2 x i32> [[RET]] | |
41 ; | |
42 %and = and <2 x i32> %x, %m | |
43 %neg = xor <2 x i32> %m, <i32 -1, i32 -1> | |
44 %and1 = and <2 x i32> %neg, %y | |
45 %ret = xor <2 x i32> %and, %and1 | |
46 ret <2 x i32> %ret | |
47 } | |
48 | |
49 define <3 x i32> @p_vec_undef(<3 x i32> %x, <3 x i32> %y, <3 x i32> %m) { | |
50 ; CHECK-LABEL: @p_vec_undef( | |
51 ; CHECK-NEXT: [[AND:%.*]] = and <3 x i32> [[X:%.*]], [[M:%.*]] | |
52 ; CHECK-NEXT: [[NEG:%.*]] = xor <3 x i32> [[M]], <i32 -1, i32 undef, i32 -1> | |
53 ; CHECK-NEXT: [[AND1:%.*]] = and <3 x i32> [[NEG]], [[Y:%.*]] | |
54 ; CHECK-NEXT: [[RET:%.*]] = or <3 x i32> [[AND]], [[AND1]] | |
55 ; CHECK-NEXT: ret <3 x i32> [[RET]] | |
56 ; | |
57 %and = and <3 x i32> %x, %m | |
58 %neg = xor <3 x i32> %m, <i32 -1, i32 undef, i32 -1> | |
59 %and1 = and <3 x i32> %neg, %y | |
60 %ret = xor <3 x i32> %and, %and1 | |
61 ret <3 x i32> %ret | |
62 } | |
63 | |
64 ; ============================================================================ ; | |
65 ; Constant mask. | |
66 ; ============================================================================ ; | |
67 | |
68 define i32 @p_constmask(i32 %x, i32 %y) { | |
69 ; CHECK-LABEL: @p_constmask( | |
70 ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 65280 | |
71 ; CHECK-NEXT: [[AND1:%.*]] = and i32 [[Y:%.*]], -65281 | |
72 ; CHECK-NEXT: [[RET1:%.*]] = or i32 [[AND]], [[AND1]] | |
73 ; CHECK-NEXT: ret i32 [[RET1]] | |
74 ; | |
75 %and = and i32 %x, 65280 | |
76 %and1 = and i32 %y, -65281 | |
77 %ret = xor i32 %and, %and1 | |
78 ret i32 %ret | |
79 } | |
80 | |
81 define <2 x i32> @p_constmask_splatvec(<2 x i32> %x, <2 x i32> %y) { | |
82 ; CHECK-LABEL: @p_constmask_splatvec( | |
83 ; CHECK-NEXT: [[AND:%.*]] = and <2 x i32> [[X:%.*]], <i32 65280, i32 65280> | |
84 ; CHECK-NEXT: [[AND1:%.*]] = and <2 x i32> [[Y:%.*]], <i32 -65281, i32 -65281> | |
85 ; CHECK-NEXT: [[RET1:%.*]] = or <2 x i32> [[AND]], [[AND1]] | |
86 ; CHECK-NEXT: ret <2 x i32> [[RET1]] | |
87 ; | |
88 %and = and <2 x i32> %x, <i32 65280, i32 65280> | |
89 %and1 = and <2 x i32> %y, <i32 -65281, i32 -65281> | |
90 %ret = xor <2 x i32> %and, %and1 | |
91 ret <2 x i32> %ret | |
92 } | |
93 | |
94 define <2 x i32> @p_constmask_vec(<2 x i32> %x, <2 x i32> %y) { | |
95 ; CHECK-LABEL: @p_constmask_vec( | |
96 ; CHECK-NEXT: [[AND:%.*]] = and <2 x i32> [[X:%.*]], <i32 65280, i32 16776960> | |
97 ; CHECK-NEXT: [[AND1:%.*]] = and <2 x i32> [[Y:%.*]], <i32 -65281, i32 -16776961> | |
98 ; CHECK-NEXT: [[RET:%.*]] = xor <2 x i32> [[AND]], [[AND1]] | |
99 ; CHECK-NEXT: ret <2 x i32> [[RET]] | |
100 ; | |
101 %and = and <2 x i32> %x, <i32 65280, i32 16776960> | |
102 %and1 = and <2 x i32> %y, <i32 -65281, i32 -16776961> | |
103 %ret = xor <2 x i32> %and, %and1 | |
104 ret <2 x i32> %ret | |
105 } | |
106 | |
107 define <3 x i32> @p_constmask_vec_undef(<3 x i32> %x, <3 x i32> %y) { | |
108 ; CHECK-LABEL: @p_constmask_vec_undef( | |
109 ; CHECK-NEXT: [[AND:%.*]] = and <3 x i32> [[X:%.*]], <i32 65280, i32 undef, i32 65280> | |
110 ; CHECK-NEXT: [[AND1:%.*]] = and <3 x i32> [[Y:%.*]], <i32 -65281, i32 undef, i32 -65281> | |
111 ; CHECK-NEXT: [[RET:%.*]] = xor <3 x i32> [[AND]], [[AND1]] | |
112 ; CHECK-NEXT: ret <3 x i32> [[RET]] | |
113 ; | |
114 %and = and <3 x i32> %x, <i32 65280, i32 undef, i32 65280> | |
115 %and1 = and <3 x i32> %y, <i32 -65281, i32 undef, i32 -65281> | |
116 %ret = xor <3 x i32> %and, %and1 | |
117 ret <3 x i32> %ret | |
118 } | |
119 | |
120 ; ============================================================================ ; | |
121 ; Constant mask with no common bits set, but common unset bits. | |
122 ; ============================================================================ ; | |
123 | |
124 define i32 @p_constmask2(i32 %x, i32 %y) { | |
125 ; CHECK-LABEL: @p_constmask2( | |
126 ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 61440 | |
127 ; CHECK-NEXT: [[AND1:%.*]] = and i32 [[Y:%.*]], -65281 | |
128 ; CHECK-NEXT: [[RET1:%.*]] = or i32 [[AND]], [[AND1]] | |
129 ; CHECK-NEXT: ret i32 [[RET1]] | |
130 ; | |
131 %and = and i32 %x, 61440 | |
132 %and1 = and i32 %y, -65281 | |
133 %ret = xor i32 %and, %and1 | |
134 ret i32 %ret | |
135 } | |
136 | |
137 define <2 x i32> @p_constmask2_splatvec(<2 x i32> %x, <2 x i32> %y) { | |
138 ; CHECK-LABEL: @p_constmask2_splatvec( | |
139 ; CHECK-NEXT: [[AND:%.*]] = and <2 x i32> [[X:%.*]], <i32 61440, i32 61440> | |
140 ; CHECK-NEXT: [[AND1:%.*]] = and <2 x i32> [[Y:%.*]], <i32 -65281, i32 -65281> | |
141 ; CHECK-NEXT: [[RET1:%.*]] = or <2 x i32> [[AND]], [[AND1]] | |
142 ; CHECK-NEXT: ret <2 x i32> [[RET1]] | |
143 ; | |
144 %and = and <2 x i32> %x, <i32 61440, i32 61440> | |
145 %and1 = and <2 x i32> %y, <i32 -65281, i32 -65281> | |
146 %ret = xor <2 x i32> %and, %and1 | |
147 ret <2 x i32> %ret | |
148 } | |
149 | |
150 define <2 x i32> @p_constmask2_vec(<2 x i32> %x, <2 x i32> %y) { | |
151 ; CHECK-LABEL: @p_constmask2_vec( | |
152 ; CHECK-NEXT: [[AND:%.*]] = and <2 x i32> [[X:%.*]], <i32 61440, i32 16711680> | |
153 ; CHECK-NEXT: [[AND1:%.*]] = and <2 x i32> [[Y:%.*]], <i32 -65281, i32 -16776961> | |
154 ; CHECK-NEXT: [[RET:%.*]] = xor <2 x i32> [[AND]], [[AND1]] | |
155 ; CHECK-NEXT: ret <2 x i32> [[RET]] | |
156 ; | |
157 %and = and <2 x i32> %x, <i32 61440, i32 16711680> | |
158 %and1 = and <2 x i32> %y, <i32 -65281, i32 -16776961> | |
159 %ret = xor <2 x i32> %and, %and1 | |
160 ret <2 x i32> %ret | |
161 } | |
162 | |
163 define <3 x i32> @p_constmask2_vec_undef(<3 x i32> %x, <3 x i32> %y) { | |
164 ; CHECK-LABEL: @p_constmask2_vec_undef( | |
165 ; CHECK-NEXT: [[AND:%.*]] = and <3 x i32> [[X:%.*]], <i32 61440, i32 undef, i32 61440> | |
166 ; CHECK-NEXT: [[AND1:%.*]] = and <3 x i32> [[Y:%.*]], <i32 -65281, i32 undef, i32 -65281> | |
167 ; CHECK-NEXT: [[RET:%.*]] = xor <3 x i32> [[AND]], [[AND1]] | |
168 ; CHECK-NEXT: ret <3 x i32> [[RET]] | |
169 ; | |
170 %and = and <3 x i32> %x, <i32 61440, i32 undef, i32 61440> | |
171 %and1 = and <3 x i32> %y, <i32 -65281, i32 undef, i32 -65281> | |
172 %ret = xor <3 x i32> %and, %and1 | |
173 ret <3 x i32> %ret | |
174 } | |
175 | |
176 ; ============================================================================ ; | |
177 ; Commutativity. | |
178 ; ============================================================================ ; | |
179 | |
180 ; Used to make sure that the IR complexity sorting does not interfere. | |
181 declare i32 @gen32() | |
182 | |
183 define i32 @p_commutative0(i32 %x, i32 %y, i32 %m) { | |
184 ; CHECK-LABEL: @p_commutative0( | |
185 ; CHECK-NEXT: [[AND:%.*]] = and i32 [[M:%.*]], [[X:%.*]] | |
186 ; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1 | |
187 ; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NEG]], [[Y:%.*]] | |
188 ; CHECK-NEXT: [[RET:%.*]] = or i32 [[AND]], [[AND1]] | |
189 ; CHECK-NEXT: ret i32 [[RET]] | |
190 ; | |
191 %and = and i32 %m, %x ; swapped order | |
192 %neg = xor i32 %m, -1 | |
193 %and1 = and i32 %neg, %y | |
194 %ret = xor i32 %and, %and1 | |
195 ret i32 %ret | |
196 } | |
197 | |
198 define i32 @p_commutative1(i32 %x, i32 %m) { | |
199 ; CHECK-LABEL: @p_commutative1( | |
200 ; CHECK-NEXT: [[Y:%.*]] = call i32 @gen32() | |
201 ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], [[M:%.*]] | |
202 ; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1 | |
203 ; CHECK-NEXT: [[AND1:%.*]] = and i32 [[Y]], [[NEG]] | |
204 ; CHECK-NEXT: [[RET:%.*]] = or i32 [[AND]], [[AND1]] | |
205 ; CHECK-NEXT: ret i32 [[RET]] | |
206 ; | |
207 %y = call i32 @gen32() | |
208 %and = and i32 %x, %m | |
209 %neg = xor i32 %m, -1 | |
210 %and1 = and i32 %y, %neg; swapped order | |
211 %ret = xor i32 %and, %and1 | |
212 ret i32 %ret | |
213 } | |
214 | |
215 define i32 @p_commutative2(i32 %x, i32 %y, i32 %m) { | |
216 ; CHECK-LABEL: @p_commutative2( | |
217 ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], [[M:%.*]] | |
218 ; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1 | |
219 ; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NEG]], [[Y:%.*]] | |
220 ; CHECK-NEXT: [[RET:%.*]] = or i32 [[AND1]], [[AND]] | |
221 ; CHECK-NEXT: ret i32 [[RET]] | |
222 ; | |
223 %and = and i32 %x, %m | |
224 %neg = xor i32 %m, -1 | |
225 %and1 = and i32 %neg, %y | |
226 %ret = xor i32 %and1, %and ; swapped order | |
227 ret i32 %ret | |
228 } | |
229 | |
230 define i32 @p_commutative3(i32 %x, i32 %m) { | |
231 ; CHECK-LABEL: @p_commutative3( | |
232 ; CHECK-NEXT: [[Y:%.*]] = call i32 @gen32() | |
233 ; CHECK-NEXT: [[AND:%.*]] = and i32 [[M:%.*]], [[X:%.*]] | |
234 ; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1 | |
235 ; CHECK-NEXT: [[AND1:%.*]] = and i32 [[Y]], [[NEG]] | |
236 ; CHECK-NEXT: [[RET:%.*]] = or i32 [[AND]], [[AND1]] | |
237 ; CHECK-NEXT: ret i32 [[RET]] | |
238 ; | |
239 %y = call i32 @gen32() | |
240 %and = and i32 %m, %x ; swapped order | |
241 %neg = xor i32 %m, -1 | |
242 %and1 = and i32 %y, %neg; swapped order | |
243 %ret = xor i32 %and, %and1 | |
244 ret i32 %ret | |
245 } | |
246 | |
247 define i32 @p_commutative4(i32 %x, i32 %y, i32 %m) { | |
248 ; CHECK-LABEL: @p_commutative4( | |
249 ; CHECK-NEXT: [[AND:%.*]] = and i32 [[M:%.*]], [[X:%.*]] | |
250 ; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1 | |
251 ; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NEG]], [[Y:%.*]] | |
252 ; CHECK-NEXT: [[RET:%.*]] = or i32 [[AND1]], [[AND]] | |
253 ; CHECK-NEXT: ret i32 [[RET]] | |
254 ; | |
255 %and = and i32 %m, %x ; swapped order | |
256 %neg = xor i32 %m, -1 | |
257 %and1 = and i32 %neg, %y | |
258 %ret = xor i32 %and1, %and ; swapped order | |
259 ret i32 %ret | |
260 } | |
261 | |
262 define i32 @p_commutative5(i32 %x, i32 %m) { | |
263 ; CHECK-LABEL: @p_commutative5( | |
264 ; CHECK-NEXT: [[Y:%.*]] = call i32 @gen32() | |
265 ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], [[M:%.*]] | |
266 ; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1 | |
267 ; CHECK-NEXT: [[AND1:%.*]] = and i32 [[Y]], [[NEG]] | |
268 ; CHECK-NEXT: [[RET:%.*]] = or i32 [[AND1]], [[AND]] | |
269 ; CHECK-NEXT: ret i32 [[RET]] | |
270 ; | |
271 %y = call i32 @gen32() | |
272 %and = and i32 %x, %m | |
273 %neg = xor i32 %m, -1 | |
274 %and1 = and i32 %y, %neg; swapped order | |
275 %ret = xor i32 %and1, %and ; swapped order | |
276 ret i32 %ret | |
277 } | |
278 | |
279 define i32 @p_commutative6(i32 %x, i32 %m) { | |
280 ; CHECK-LABEL: @p_commutative6( | |
281 ; CHECK-NEXT: [[Y:%.*]] = call i32 @gen32() | |
282 ; CHECK-NEXT: [[AND:%.*]] = and i32 [[M:%.*]], [[X:%.*]] | |
283 ; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1 | |
284 ; CHECK-NEXT: [[AND1:%.*]] = and i32 [[Y]], [[NEG]] | |
285 ; CHECK-NEXT: [[RET:%.*]] = or i32 [[AND1]], [[AND]] | |
286 ; CHECK-NEXT: ret i32 [[RET]] | |
287 ; | |
288 %y = call i32 @gen32() | |
289 %and = and i32 %m, %x ; swapped order | |
290 %neg = xor i32 %m, -1 | |
291 %and1 = and i32 %y, %neg; swapped order | |
292 %ret = xor i32 %and1, %and ; swapped order | |
293 ret i32 %ret | |
294 } | |
295 | |
296 define i32 @p_constmask_commutative(i32 %x, i32 %y) { | |
297 ; CHECK-LABEL: @p_constmask_commutative( | |
298 ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 65280 | |
299 ; CHECK-NEXT: [[AND1:%.*]] = and i32 [[Y:%.*]], -65281 | |
300 ; CHECK-NEXT: [[RET1:%.*]] = or i32 [[AND1]], [[AND]] | |
301 ; CHECK-NEXT: ret i32 [[RET1]] | |
302 ; | |
303 %and = and i32 %x, 65280 | |
304 %and1 = and i32 %y, -65281 | |
305 %ret = xor i32 %and1, %and ; swapped order | |
306 ret i32 %ret | |
307 } | |
308 | |
309 ; ============================================================================ ; | |
310 ; Negative tests. Should not be folded. | |
311 ; ============================================================================ ; | |
312 | |
313 ; One use only. | |
314 | |
315 declare void @use32(i32) | |
316 | |
317 define i32 @n0_oneuse(i32 %x, i32 %y, i32 %m) { | |
318 ; CHECK-LABEL: @n0_oneuse( | |
319 ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], [[M:%.*]] | |
320 ; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], -1 | |
321 ; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NEG]], [[Y:%.*]] | |
322 ; CHECK-NEXT: [[RET:%.*]] = or i32 [[AND]], [[AND1]] | |
323 ; CHECK-NEXT: call void @use32(i32 [[AND]]) | |
324 ; CHECK-NEXT: call void @use32(i32 [[NEG]]) | |
325 ; CHECK-NEXT: call void @use32(i32 [[AND1]]) | |
326 ; CHECK-NEXT: ret i32 [[RET]] | |
327 ; | |
328 %and = and i32 %x, %m | |
329 %neg = xor i32 %m, -1 | |
330 %and1 = and i32 %neg, %y | |
331 %ret = xor i32 %and, %and1 | |
332 call void @use32(i32 %and) | |
333 call void @use32(i32 %neg) | |
334 call void @use32(i32 %and1) | |
335 ret i32 %ret | |
336 } | |
337 | |
338 define i32 @n0_constmask_oneuse(i32 %x, i32 %y) { | |
339 ; CHECK-LABEL: @n0_constmask_oneuse( | |
340 ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 65280 | |
341 ; CHECK-NEXT: [[AND1:%.*]] = and i32 [[Y:%.*]], -65281 | |
342 ; CHECK-NEXT: [[RET1:%.*]] = or i32 [[AND]], [[AND1]] | |
343 ; CHECK-NEXT: call void @use32(i32 [[AND]]) | |
344 ; CHECK-NEXT: call void @use32(i32 [[AND1]]) | |
345 ; CHECK-NEXT: ret i32 [[RET1]] | |
346 ; | |
347 %and = and i32 %x, 65280 | |
348 %and1 = and i32 %y, -65281 | |
349 %ret = xor i32 %and, %and1 | |
350 call void @use32(i32 %and) | |
351 call void @use32(i32 %and1) | |
352 ret i32 %ret | |
353 } | |
354 | |
355 ; Bad xor constant | |
356 | |
357 define i32 @n1_badxor(i32 %x, i32 %y, i32 %m) { | |
358 ; CHECK-LABEL: @n1_badxor( | |
359 ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], [[M:%.*]] | |
360 ; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M]], 1 | |
361 ; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NEG]], [[Y:%.*]] | |
362 ; CHECK-NEXT: [[RET:%.*]] = xor i32 [[AND]], [[AND1]] | |
363 ; CHECK-NEXT: ret i32 [[RET]] | |
364 ; | |
365 %and = and i32 %x, %m | |
366 %neg = xor i32 %m, 1 ; not -1 | |
367 %and1 = and i32 %neg, %y | |
368 %ret = xor i32 %and, %and1 | |
369 ret i32 %ret | |
370 } | |
371 | |
372 ; Different mask is used | |
373 | |
374 define i32 @n2_badmask(i32 %x, i32 %y, i32 %m1, i32 %m2) { | |
375 ; CHECK-LABEL: @n2_badmask( | |
376 ; CHECK-NEXT: [[AND:%.*]] = and i32 [[M1:%.*]], [[X:%.*]] | |
377 ; CHECK-NEXT: [[NEG:%.*]] = xor i32 [[M2:%.*]], -1 | |
378 ; CHECK-NEXT: [[AND1:%.*]] = and i32 [[NEG]], [[Y:%.*]] | |
379 ; CHECK-NEXT: [[RET:%.*]] = xor i32 [[AND]], [[AND1]] | |
380 ; CHECK-NEXT: ret i32 [[RET]] | |
381 ; | |
382 %and = and i32 %m1, %x | |
383 %neg = xor i32 %m2, -1 ; different mask, not %m1 | |
384 %and1 = and i32 %neg, %y | |
385 %ret = xor i32 %and, %and1 | |
386 ret i32 %ret | |
387 } | |
388 | |
389 ; Different const mask is used | |
390 | |
391 define i32 @n3_constmask_badmask(i32 %x, i32 %y) { | |
392 ; CHECK-LABEL: @n3_constmask_badmask( | |
393 ; CHECK-NEXT: [[AND:%.*]] = and i32 [[X:%.*]], 65280 | |
394 ; CHECK-NEXT: [[AND1:%.*]] = and i32 [[Y:%.*]], -65280 | |
395 ; CHECK-NEXT: [[RET:%.*]] = xor i32 [[AND]], [[AND1]] | |
396 ; CHECK-NEXT: ret i32 [[RET]] | |
397 ; | |
398 %and = and i32 %x, 65280 | |
399 %and1 = and i32 %y, -65280 ; not -65281, so they have one common bit | |
400 %ret = xor i32 %and, %and1 | |
401 ret i32 %ret | |
402 } | |
403 | |
404 define i32 @n3_constmask_samemask(i32 %x, i32 %y) { | |
405 ; CHECK-LABEL: @n3_constmask_samemask( | |
406 ; CHECK-NEXT: [[AND2:%.*]] = xor i32 [[X:%.*]], [[Y:%.*]] | |
407 ; CHECK-NEXT: [[RET:%.*]] = and i32 [[AND2]], 65280 | |
408 ; CHECK-NEXT: ret i32 [[RET]] | |
409 ; | |
410 %and = and i32 %x, 65280 | |
411 %and1 = and i32 %y, 65280 ; both masks are the same | |
412 %ret = xor i32 %and, %and1 | |
413 ret i32 %ret | |
414 } |