Mercurial > hg > CbC > CbC_llvm
comparison llvm/test/Transforms/LoopDistribute/scev-inserted-runtime-check.ll @ 150:1d019706d866
LLVM10
author | anatofuz |
---|---|
date | Thu, 13 Feb 2020 15:10:13 +0900 |
parents | |
children | 2e18cbf3894f |
comparison
equal
deleted
inserted
replaced
147:c2174574ed3a | 150:1d019706d866 |
---|---|
1 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py | |
2 ; RUN: opt -basicaa -loop-distribute -enable-loop-distribute -S -enable-mem-access-versioning=0 < %s | FileCheck %s | |
3 | |
4 target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128" | |
5 | |
6 ; PredicatedScalarEvolution decides it needs to insert a bounds check | |
7 ; not based on memory access. | |
8 | |
9 define void @f(i32* noalias %a, i32* noalias %b, i32* noalias %c, i32* noalias %d, i32* noalias %e, i64 %N) { | |
10 ; CHECK-LABEL: @f( | |
11 ; CHECK-NEXT: entry: | |
12 ; CHECK-NEXT: [[A2:%.*]] = ptrtoint i32* [[A:%.*]] to i64 | |
13 ; CHECK-NEXT: br label [[FOR_BODY_LVER_CHECK:%.*]] | |
14 ; CHECK: for.body.lver.check: | |
15 ; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[N:%.*]], -1 | |
16 ; CHECK-NEXT: [[TMP1:%.*]] = trunc i64 [[TMP0]] to i32 | |
17 ; CHECK-NEXT: [[MUL1:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 2, i32 [[TMP1]]) | |
18 ; CHECK-NEXT: [[MUL_RESULT:%.*]] = extractvalue { i32, i1 } [[MUL1]], 0 | |
19 ; CHECK-NEXT: [[MUL_OVERFLOW:%.*]] = extractvalue { i32, i1 } [[MUL1]], 1 | |
20 ; CHECK-NEXT: [[TMP2:%.*]] = add i32 0, [[MUL_RESULT]] | |
21 ; CHECK-NEXT: [[TMP3:%.*]] = sub i32 0, [[MUL_RESULT]] | |
22 ; CHECK-NEXT: [[TMP4:%.*]] = icmp ugt i32 [[TMP3]], 0 | |
23 ; CHECK-NEXT: [[TMP5:%.*]] = icmp ult i32 [[TMP2]], 0 | |
24 ; CHECK-NEXT: [[TMP6:%.*]] = select i1 false, i1 [[TMP4]], i1 [[TMP5]] | |
25 ; CHECK-NEXT: [[TMP7:%.*]] = icmp ugt i64 [[TMP0]], 4294967295 | |
26 ; CHECK-NEXT: [[TMP8:%.*]] = or i1 [[TMP6]], [[TMP7]] | |
27 ; CHECK-NEXT: [[TMP9:%.*]] = or i1 [[TMP8]], [[MUL_OVERFLOW]] | |
28 ; CHECK-NEXT: [[TMP10:%.*]] = or i1 false, [[TMP9]] | |
29 ; CHECK-NEXT: [[MUL3:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 8, i64 [[TMP0]]) | |
30 ; CHECK-NEXT: [[MUL_RESULT4:%.*]] = extractvalue { i64, i1 } [[MUL3]], 0 | |
31 ; CHECK-NEXT: [[MUL_OVERFLOW5:%.*]] = extractvalue { i64, i1 } [[MUL3]], 1 | |
32 ; CHECK-NEXT: [[TMP11:%.*]] = add i64 [[A2]], [[MUL_RESULT4]] | |
33 ; CHECK-NEXT: [[TMP12:%.*]] = sub i64 [[A2]], [[MUL_RESULT4]] | |
34 ; CHECK-NEXT: [[TMP13:%.*]] = icmp ugt i64 [[TMP12]], [[A2]] | |
35 ; CHECK-NEXT: [[TMP14:%.*]] = icmp ult i64 [[TMP11]], [[A2]] | |
36 ; CHECK-NEXT: [[TMP15:%.*]] = select i1 false, i1 [[TMP13]], i1 [[TMP14]] | |
37 ; CHECK-NEXT: [[TMP16:%.*]] = or i1 [[TMP15]], [[MUL_OVERFLOW5]] | |
38 ; CHECK-NEXT: [[TMP17:%.*]] = or i1 [[TMP10]], [[TMP16]] | |
39 ; CHECK-NEXT: br i1 [[TMP17]], label [[FOR_BODY_PH_LVER_ORIG:%.*]], label [[FOR_BODY_PH_LDIST1:%.*]] | |
40 ; CHECK: for.body.ph.lver.orig: | |
41 ; CHECK-NEXT: br label [[FOR_BODY_LVER_ORIG:%.*]] | |
42 ; CHECK: for.body.lver.orig: | |
43 ; CHECK-NEXT: [[IND_LVER_ORIG:%.*]] = phi i64 [ 0, [[FOR_BODY_PH_LVER_ORIG]] ], [ [[ADD_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ] | |
44 ; CHECK-NEXT: [[IND1_LVER_ORIG:%.*]] = phi i32 [ 0, [[FOR_BODY_PH_LVER_ORIG]] ], [ [[INC1_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ] | |
45 ; CHECK-NEXT: [[MUL_LVER_ORIG:%.*]] = mul i32 [[IND1_LVER_ORIG]], 2 | |
46 ; CHECK-NEXT: [[MUL_EXT_LVER_ORIG:%.*]] = zext i32 [[MUL_LVER_ORIG]] to i64 | |
47 ; CHECK-NEXT: [[ARRAYIDXA_LVER_ORIG:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[MUL_EXT_LVER_ORIG]] | |
48 ; CHECK-NEXT: [[LOADA_LVER_ORIG:%.*]] = load i32, i32* [[ARRAYIDXA_LVER_ORIG]], align 4 | |
49 ; CHECK-NEXT: [[ARRAYIDXB_LVER_ORIG:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 [[MUL_EXT_LVER_ORIG]] | |
50 ; CHECK-NEXT: [[LOADB_LVER_ORIG:%.*]] = load i32, i32* [[ARRAYIDXB_LVER_ORIG]], align 4 | |
51 ; CHECK-NEXT: [[MULA_LVER_ORIG:%.*]] = mul i32 [[LOADB_LVER_ORIG]], [[LOADA_LVER_ORIG]] | |
52 ; CHECK-NEXT: [[ADD_LVER_ORIG]] = add nuw nsw i64 [[IND_LVER_ORIG]], 1 | |
53 ; CHECK-NEXT: [[INC1_LVER_ORIG]] = add i32 [[IND1_LVER_ORIG]], 1 | |
54 ; CHECK-NEXT: [[ARRAYIDXA_PLUS_4_LVER_ORIG:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[ADD_LVER_ORIG]] | |
55 ; CHECK-NEXT: store i32 [[MULA_LVER_ORIG]], i32* [[ARRAYIDXA_PLUS_4_LVER_ORIG]], align 4 | |
56 ; CHECK-NEXT: [[ARRAYIDXD_LVER_ORIG:%.*]] = getelementptr inbounds i32, i32* [[D:%.*]], i64 [[MUL_EXT_LVER_ORIG]] | |
57 ; CHECK-NEXT: [[LOADD_LVER_ORIG:%.*]] = load i32, i32* [[ARRAYIDXD_LVER_ORIG]], align 4 | |
58 ; CHECK-NEXT: [[ARRAYIDXE_LVER_ORIG:%.*]] = getelementptr inbounds i32, i32* [[E:%.*]], i64 [[MUL_EXT_LVER_ORIG]] | |
59 ; CHECK-NEXT: [[LOADE_LVER_ORIG:%.*]] = load i32, i32* [[ARRAYIDXE_LVER_ORIG]], align 4 | |
60 ; CHECK-NEXT: [[MULC_LVER_ORIG:%.*]] = mul i32 [[LOADD_LVER_ORIG]], [[LOADE_LVER_ORIG]] | |
61 ; CHECK-NEXT: [[ARRAYIDXC_LVER_ORIG:%.*]] = getelementptr inbounds i32, i32* [[C:%.*]], i64 [[MUL_EXT_LVER_ORIG]] | |
62 ; CHECK-NEXT: store i32 [[MULC_LVER_ORIG]], i32* [[ARRAYIDXC_LVER_ORIG]], align 4 | |
63 ; CHECK-NEXT: [[EXITCOND_LVER_ORIG:%.*]] = icmp eq i64 [[ADD_LVER_ORIG]], [[N]] | |
64 ; CHECK-NEXT: br i1 [[EXITCOND_LVER_ORIG]], label [[FOR_END:%.*]], label [[FOR_BODY_LVER_ORIG]] | |
65 ; CHECK: for.body.ph.ldist1: | |
66 ; CHECK-NEXT: br label [[FOR_BODY_LDIST1:%.*]] | |
67 ; CHECK: for.body.ldist1: | |
68 ; CHECK-NEXT: [[IND_LDIST1:%.*]] = phi i64 [ 0, [[FOR_BODY_PH_LDIST1]] ], [ [[ADD_LDIST1:%.*]], [[FOR_BODY_LDIST1]] ] | |
69 ; CHECK-NEXT: [[IND1_LDIST1:%.*]] = phi i32 [ 0, [[FOR_BODY_PH_LDIST1]] ], [ [[INC1_LDIST1:%.*]], [[FOR_BODY_LDIST1]] ] | |
70 ; CHECK-NEXT: [[MUL_LDIST1:%.*]] = mul i32 [[IND1_LDIST1]], 2 | |
71 ; CHECK-NEXT: [[MUL_EXT_LDIST1:%.*]] = zext i32 [[MUL_LDIST1]] to i64 | |
72 ; CHECK-NEXT: [[ARRAYIDXA_LDIST1:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[MUL_EXT_LDIST1]] | |
73 ; CHECK-NEXT: [[LOADA_LDIST1:%.*]] = load i32, i32* [[ARRAYIDXA_LDIST1]], align 4 | |
74 ; CHECK-NEXT: [[ARRAYIDXB_LDIST1:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 [[MUL_EXT_LDIST1]] | |
75 ; CHECK-NEXT: [[LOADB_LDIST1:%.*]] = load i32, i32* [[ARRAYIDXB_LDIST1]], align 4 | |
76 ; CHECK-NEXT: [[MULA_LDIST1:%.*]] = mul i32 [[LOADB_LDIST1]], [[LOADA_LDIST1]] | |
77 ; CHECK-NEXT: [[ADD_LDIST1]] = add nuw nsw i64 [[IND_LDIST1]], 1 | |
78 ; CHECK-NEXT: [[INC1_LDIST1]] = add i32 [[IND1_LDIST1]], 1 | |
79 ; CHECK-NEXT: [[ARRAYIDXA_PLUS_4_LDIST1:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[ADD_LDIST1]] | |
80 ; CHECK-NEXT: store i32 [[MULA_LDIST1]], i32* [[ARRAYIDXA_PLUS_4_LDIST1]], align 4 | |
81 ; CHECK-NEXT: [[EXITCOND_LDIST1:%.*]] = icmp eq i64 [[ADD_LDIST1]], [[N]] | |
82 ; CHECK-NEXT: br i1 [[EXITCOND_LDIST1]], label [[FOR_BODY_PH:%.*]], label [[FOR_BODY_LDIST1]] | |
83 ; CHECK: for.body.ph: | |
84 ; CHECK-NEXT: br label [[FOR_BODY:%.*]] | |
85 ; CHECK: for.body: | |
86 ; CHECK-NEXT: [[IND:%.*]] = phi i64 [ 0, [[FOR_BODY_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] | |
87 ; CHECK-NEXT: [[IND1:%.*]] = phi i32 [ 0, [[FOR_BODY_PH]] ], [ [[INC1:%.*]], [[FOR_BODY]] ] | |
88 ; CHECK-NEXT: [[MUL:%.*]] = mul i32 [[IND1]], 2 | |
89 ; CHECK-NEXT: [[MUL_EXT:%.*]] = zext i32 [[MUL]] to i64 | |
90 ; CHECK-NEXT: [[ADD]] = add nuw nsw i64 [[IND]], 1 | |
91 ; CHECK-NEXT: [[INC1]] = add i32 [[IND1]], 1 | |
92 ; CHECK-NEXT: [[ARRAYIDXD:%.*]] = getelementptr inbounds i32, i32* [[D]], i64 [[MUL_EXT]] | |
93 ; CHECK-NEXT: [[LOADD:%.*]] = load i32, i32* [[ARRAYIDXD]], align 4 | |
94 ; CHECK-NEXT: [[ARRAYIDXE:%.*]] = getelementptr inbounds i32, i32* [[E]], i64 [[MUL_EXT]] | |
95 ; CHECK-NEXT: [[LOADE:%.*]] = load i32, i32* [[ARRAYIDXE]], align 4 | |
96 ; CHECK-NEXT: [[MULC:%.*]] = mul i32 [[LOADD]], [[LOADE]] | |
97 ; CHECK-NEXT: [[ARRAYIDXC:%.*]] = getelementptr inbounds i32, i32* [[C]], i64 [[MUL_EXT]] | |
98 ; CHECK-NEXT: store i32 [[MULC]], i32* [[ARRAYIDXC]], align 4 | |
99 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[ADD]], [[N]] | |
100 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END]], label [[FOR_BODY]] | |
101 ; CHECK: for.end: | |
102 ; CHECK-NEXT: ret void | |
103 ; | |
104 entry: | |
105 br label %for.body | |
106 | |
107 for.body: ; preds = %for.body, %entry | |
108 %ind = phi i64 [ 0, %entry ], [ %add, %for.body ] | |
109 %ind1 = phi i32 [ 0, %entry ], [ %inc1, %for.body ] | |
110 | |
111 %mul = mul i32 %ind1, 2 | |
112 %mul_ext = zext i32 %mul to i64 | |
113 | |
114 | |
115 %arrayidxA = getelementptr inbounds i32, i32* %a, i64 %mul_ext | |
116 %loadA = load i32, i32* %arrayidxA, align 4 | |
117 | |
118 %arrayidxB = getelementptr inbounds i32, i32* %b, i64 %mul_ext | |
119 %loadB = load i32, i32* %arrayidxB, align 4 | |
120 | |
121 %mulA = mul i32 %loadB, %loadA | |
122 | |
123 %add = add nuw nsw i64 %ind, 1 | |
124 %inc1 = add i32 %ind1, 1 | |
125 | |
126 %arrayidxA_plus_4 = getelementptr inbounds i32, i32* %a, i64 %add | |
127 store i32 %mulA, i32* %arrayidxA_plus_4, align 4 | |
128 | |
129 %arrayidxD = getelementptr inbounds i32, i32* %d, i64 %mul_ext | |
130 %loadD = load i32, i32* %arrayidxD, align 4 | |
131 | |
132 %arrayidxE = getelementptr inbounds i32, i32* %e, i64 %mul_ext | |
133 %loadE = load i32, i32* %arrayidxE, align 4 | |
134 | |
135 %mulC = mul i32 %loadD, %loadE | |
136 | |
137 %arrayidxC = getelementptr inbounds i32, i32* %c, i64 %mul_ext | |
138 store i32 %mulC, i32* %arrayidxC, align 4 | |
139 | |
140 %exitcond = icmp eq i64 %add, %N | |
141 br i1 %exitcond, label %for.end, label %for.body | |
142 | |
143 for.end: ; preds = %for.body | |
144 ret void | |
145 } | |
146 | |
147 ; Can't add control dependency with convergent in loop body. | |
148 define void @f_with_convergent(i32* noalias %a, i32* noalias %b, i32* noalias %c, i32* noalias %d, i32* noalias %e, i64 %N) #1 { | |
149 ; CHECK-LABEL: @f_with_convergent( | |
150 ; CHECK-NEXT: entry: | |
151 ; CHECK-NEXT: br label [[FOR_BODY:%.*]] | |
152 ; CHECK: for.body: | |
153 ; CHECK-NEXT: [[IND:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[ADD:%.*]], [[FOR_BODY]] ] | |
154 ; CHECK-NEXT: [[IND1:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[INC1:%.*]], [[FOR_BODY]] ] | |
155 ; CHECK-NEXT: [[MUL:%.*]] = mul i32 [[IND1]], 2 | |
156 ; CHECK-NEXT: [[MUL_EXT:%.*]] = zext i32 [[MUL]] to i64 | |
157 ; CHECK-NEXT: [[ARRAYIDXA:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[MUL_EXT]] | |
158 ; CHECK-NEXT: [[LOADA:%.*]] = load i32, i32* [[ARRAYIDXA]], align 4 | |
159 ; CHECK-NEXT: [[ARRAYIDXB:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 [[MUL_EXT]] | |
160 ; CHECK-NEXT: [[LOADB:%.*]] = load i32, i32* [[ARRAYIDXB]], align 4 | |
161 ; CHECK-NEXT: [[MULA:%.*]] = mul i32 [[LOADB]], [[LOADA]] | |
162 ; CHECK-NEXT: [[ADD]] = add nuw nsw i64 [[IND]], 1 | |
163 ; CHECK-NEXT: [[INC1]] = add i32 [[IND1]], 1 | |
164 ; CHECK-NEXT: [[ARRAYIDXA_PLUS_4:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[ADD]] | |
165 ; CHECK-NEXT: store i32 [[MULA]], i32* [[ARRAYIDXA_PLUS_4]], align 4 | |
166 ; CHECK-NEXT: [[ARRAYIDXD:%.*]] = getelementptr inbounds i32, i32* [[D:%.*]], i64 [[MUL_EXT]] | |
167 ; CHECK-NEXT: [[LOADD:%.*]] = load i32, i32* [[ARRAYIDXD]], align 4 | |
168 ; CHECK-NEXT: [[ARRAYIDXE:%.*]] = getelementptr inbounds i32, i32* [[E:%.*]], i64 [[MUL_EXT]] | |
169 ; CHECK-NEXT: [[LOADE:%.*]] = load i32, i32* [[ARRAYIDXE]], align 4 | |
170 ; CHECK-NEXT: [[CONVERGENTD:%.*]] = call i32 @llvm.convergent(i32 [[LOADD]]) | |
171 ; CHECK-NEXT: [[MULC:%.*]] = mul i32 [[CONVERGENTD]], [[LOADE]] | |
172 ; CHECK-NEXT: [[ARRAYIDXC:%.*]] = getelementptr inbounds i32, i32* [[C:%.*]], i64 [[MUL_EXT]] | |
173 ; CHECK-NEXT: store i32 [[MULC]], i32* [[ARRAYIDXC]], align 4 | |
174 ; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[ADD]], [[N:%.*]] | |
175 ; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]] | |
176 ; CHECK: for.end: | |
177 ; CHECK-NEXT: ret void | |
178 ; | |
179 entry: | |
180 br label %for.body | |
181 | |
182 for.body: ; preds = %for.body, %entry | |
183 %ind = phi i64 [ 0, %entry ], [ %add, %for.body ] | |
184 %ind1 = phi i32 [ 0, %entry ], [ %inc1, %for.body ] | |
185 | |
186 %mul = mul i32 %ind1, 2 | |
187 %mul_ext = zext i32 %mul to i64 | |
188 | |
189 | |
190 %arrayidxA = getelementptr inbounds i32, i32* %a, i64 %mul_ext | |
191 %loadA = load i32, i32* %arrayidxA, align 4 | |
192 | |
193 %arrayidxB = getelementptr inbounds i32, i32* %b, i64 %mul_ext | |
194 %loadB = load i32, i32* %arrayidxB, align 4 | |
195 | |
196 %mulA = mul i32 %loadB, %loadA | |
197 | |
198 %add = add nuw nsw i64 %ind, 1 | |
199 %inc1 = add i32 %ind1, 1 | |
200 | |
201 %arrayidxA_plus_4 = getelementptr inbounds i32, i32* %a, i64 %add | |
202 store i32 %mulA, i32* %arrayidxA_plus_4, align 4 | |
203 | |
204 %arrayidxD = getelementptr inbounds i32, i32* %d, i64 %mul_ext | |
205 %loadD = load i32, i32* %arrayidxD, align 4 | |
206 | |
207 %arrayidxE = getelementptr inbounds i32, i32* %e, i64 %mul_ext | |
208 %loadE = load i32, i32* %arrayidxE, align 4 | |
209 | |
210 %convergentD = call i32 @llvm.convergent(i32 %loadD) | |
211 %mulC = mul i32 %convergentD, %loadE | |
212 | |
213 %arrayidxC = getelementptr inbounds i32, i32* %c, i64 %mul_ext | |
214 store i32 %mulC, i32* %arrayidxC, align 4 | |
215 | |
216 %exitcond = icmp eq i64 %add, %N | |
217 br i1 %exitcond, label %for.end, label %for.body | |
218 | |
219 for.end: ; preds = %for.body | |
220 ret void | |
221 } | |
222 | |
223 declare i32 @llvm.convergent(i32) #0 | |
224 | |
225 attributes #0 = { nounwind readnone convergent } | |
226 attributes #1 = { nounwind convergent } |