Mercurial > hg > CbC > CbC_llvm
comparison clang/test/CodeGenObjC/strong-in-c-struct.m @ 252:1f2b6ac9f198 llvm-original
LLVM16-1
author | Shinji KONO <kono@ie.u-ryukyu.ac.jp> |
---|---|
date | Fri, 18 Aug 2023 09:04:13 +0900 |
parents | c4bab56944e8 |
children |
comparison
equal
deleted
inserted
replaced
237:c80f45b162ad | 252:1f2b6ac9f198 |
---|---|
1 // RUN: %clang_cc1 -no-opaque-pointers -triple arm64-apple-ios11 -fobjc-arc -fblocks -fobjc-runtime=ios-11.0 -emit-llvm -o - -DUSESTRUCT %s | FileCheck %s | 1 // RUN: %clang_cc1 -triple arm64-apple-ios11 -fobjc-arc -fblocks -fobjc-runtime=ios-11.0 -emit-llvm -o - -DUSESTRUCT %s | FileCheck %s |
2 | 2 |
3 // RUN: %clang_cc1 -no-opaque-pointers -triple arm64-apple-ios11 -fobjc-arc -fblocks -fobjc-runtime=ios-11.0 -emit-pch -o %t %s | 3 // RUN: %clang_cc1 -triple arm64-apple-ios11 -fobjc-arc -fblocks -fobjc-runtime=ios-11.0 -emit-pch -o %t %s |
4 // RUN: %clang_cc1 -no-opaque-pointers -triple arm64-apple-ios11 -fobjc-arc -fblocks -fobjc-runtime=ios-11.0 -include-pch %t -emit-llvm -o - -DUSESTRUCT %s | FileCheck %s | 4 // RUN: %clang_cc1 -triple arm64-apple-ios11 -fobjc-arc -fblocks -fobjc-runtime=ios-11.0 -include-pch %t -emit-llvm -o - -DUSESTRUCT %s | FileCheck %s |
5 | 5 |
6 #ifndef HEADER | 6 #ifndef HEADER |
7 #define HEADER | 7 #define HEADER |
8 | 8 |
9 typedef void (^BlockTy)(void); | 9 typedef void (^BlockTy)(void); |
101 @end | 101 @end |
102 | 102 |
103 id g0; | 103 id g0; |
104 StrongSmall g1, g2; | 104 StrongSmall g1, g2; |
105 | 105 |
106 // CHECK: %[[STRUCT_STRONGSMALL:.*]] = type { i32, i8* } | 106 // CHECK: %[[STRUCT_STRONGSMALL:.*]] = type { i32, ptr } |
107 // CHECK: %[[STRUCT_STRONGOUTER:.*]] = type { %[[STRUCT_STRONG:.*]], i8*, double } | 107 // CHECK: %[[STRUCT_STRONGOUTER:.*]] = type { %[[STRUCT_STRONG:.*]], ptr, double } |
108 // CHECK: %[[STRUCT_STRONG]] = type { %[[STRUCT_TRIVIAL:.*]], i8* } | 108 // CHECK: %[[STRUCT_STRONG]] = type { %[[STRUCT_TRIVIAL:.*]], ptr } |
109 // CHECK: %[[STRUCT_TRIVIAL]] = type { [4 x i32] } | 109 // CHECK: %[[STRUCT_TRIVIAL]] = type { [4 x i32] } |
110 // CHECK: %[[STRUCT_BLOCK_BYREF_T:.*]] = type { i8*, %[[STRUCT_BLOCK_BYREF_T]]*, i32, i32, i8*, i8*, i8*, %[[STRUCT_STRONGOUTER]] } | 110 // CHECK: %[[STRUCT_BLOCK_BYREF_T:.*]] = type { ptr, ptr, i32, i32, ptr, ptr, ptr, %[[STRUCT_STRONGOUTER]] } |
111 // CHECK: %[[STRUCT_STRONGBLOCK:.*]] = type { void ()* } | 111 // CHECK: %[[STRUCT_STRONGBLOCK:.*]] = type { ptr } |
112 // CHECK: %[[STRUCT_BITFIELD1:.*]] = type { i8, i8, i8*, i32, i8*, [3 x i32], i8*, double, i8, i8 } | 112 // CHECK: %[[STRUCT_BITFIELD1:.*]] = type { i8, i8, ptr, i32, ptr, [3 x i32], ptr, double, i8, i8 } |
113 | 113 |
114 // CHECK: define{{.*}} void @test_constructor_destructor_StrongOuter() | 114 // CHECK: define{{.*}} void @test_constructor_destructor_StrongOuter() |
115 // CHECK: %[[T:.*]] = alloca %[[STRUCT_STRONGOUTER]], align 8 | 115 // CHECK: %[[T:.*]] = alloca %[[STRUCT_STRONGOUTER]], align 8 |
116 // CHECK: %[[V0:.*]] = bitcast %[[STRUCT_STRONGOUTER]]* %[[T]] to i8** | 116 // CHECK: call void @__default_constructor_8_S_s16_s24(ptr %[[T]]) |
117 // CHECK: call void @__default_constructor_8_S_s16_s24(i8** %[[V0]]) | 117 // CHECK: call void @__destructor_8_S_s16_s24(ptr %[[T]]) |
118 // CHECK: %[[V1:.*]] = bitcast %[[STRUCT_STRONGOUTER]]* %[[T]] to i8** | 118 // CHECK: ret void |
119 // CHECK: call void @__destructor_8_S_s16_s24(i8** %[[V1]]) | 119 |
120 // CHECK: ret void | 120 // CHECK: define linkonce_odr hidden void @__default_constructor_8_S_s16_s24(ptr noundef %[[DST:.*]]) |
121 | 121 // CHECK: %[[DST_ADDR:.*]] = alloca ptr, align 8 |
122 // CHECK: define linkonce_odr hidden void @__default_constructor_8_S_s16_s24(i8** noundef %[[DST:.*]]) | 122 // CHECK: store ptr %[[DST]], ptr %[[DST_ADDR]], align 8 |
123 // CHECK: %[[DST_ADDR:.*]] = alloca i8**, align 8 | 123 // CHECK: %[[V0:.*]] = load ptr, ptr %[[DST_ADDR]], align 8 |
124 // CHECK: store i8** %[[DST]], i8*** %[[DST_ADDR]], align 8 | 124 // CHECK: call void @__default_constructor_8_s16(ptr %[[V0]]) |
125 // CHECK: %[[V0:.*]] = load i8**, i8*** %[[DST_ADDR]], align 8 | 125 // CHECK: %[[V2:.*]] = getelementptr inbounds i8, ptr %[[V0]], i64 24 |
126 // CHECK: call void @__default_constructor_8_s16(i8** %[[V0]]) | 126 // CHECK: call void @llvm.memset.p0.i64(ptr align 8 %[[V2]], i8 0, i64 8, i1 false) |
127 // CHECK: %[[V1:.*]] = bitcast i8** %[[V0]] to i8* | 127 // CHECK: ret void |
128 // CHECK: %[[V2:.*]] = getelementptr inbounds i8, i8* %[[V1]], i64 24 | 128 |
129 // CHECK: %[[V3:.*]] = bitcast i8* %[[V2]] to i8** | 129 // CHECK: define linkonce_odr hidden void @__default_constructor_8_s16(ptr noundef %[[DST:.*]]) |
130 // CHECK: %[[V4:.*]] = bitcast i8** %[[V3]] to i8* | 130 // CHECK: %[[DST_ADDR:.*]] = alloca ptr, align 8 |
131 // CHECK: call void @llvm.memset.p0i8.i64(i8* align 8 %[[V4]], i8 0, i64 8, i1 false) | 131 // CHECK: store ptr %[[DST]], ptr %[[DST_ADDR]], align 8 |
132 // CHECK: ret void | 132 // CHECK: %[[V0:.*]] = load ptr, ptr %[[DST_ADDR]], align 8 |
133 | 133 // CHECK: %[[V2:.*]] = getelementptr inbounds i8, ptr %[[V0]], i64 16 |
134 // CHECK: define linkonce_odr hidden void @__default_constructor_8_s16(i8** noundef %[[DST:.*]]) | 134 // CHECK: call void @llvm.memset.p0.i64(ptr align 8 %[[V2]], i8 0, i64 8, i1 false) |
135 // CHECK: %[[DST_ADDR:.*]] = alloca i8**, align 8 | 135 // CHECK: ret void |
136 // CHECK: store i8** %[[DST]], i8*** %[[DST_ADDR]], align 8 | 136 |
137 // CHECK: %[[V0:.*]] = load i8**, i8*** %[[DST_ADDR]], align 8 | 137 // CHECK: define linkonce_odr hidden void @__destructor_8_S_s16_s24(ptr noundef %[[DST:.*]]) |
138 // CHECK: %[[V1:.*]] = bitcast i8** %[[V0]] to i8* | 138 // CHECK: %[[DST_ADDR:.*]] = alloca ptr, align 8 |
139 // CHECK: %[[V2:.*]] = getelementptr inbounds i8, i8* %[[V1]], i64 16 | 139 // CHECK: store ptr %[[DST]], ptr %[[DST_ADDR]], align 8 |
140 // CHECK: %[[V3:.*]] = bitcast i8* %[[V2]] to i8** | 140 // CHECK: %[[V0:.*]] = load ptr, ptr %[[DST_ADDR]], align 8 |
141 // CHECK: %[[V4:.*]] = bitcast i8** %[[V3]] to i8* | 141 // CHECK: call void @__destructor_8_s16(ptr %[[V0]]) |
142 // CHECK: call void @llvm.memset.p0i8.i64(i8* align 8 %[[V4]], i8 0, i64 8, i1 false) | 142 // CHECK: %[[V2:.*]] = getelementptr inbounds i8, ptr %[[V0]], i64 24 |
143 // CHECK: ret void | 143 // CHECK: call void @llvm.objc.storeStrong(ptr %[[V2]], ptr null) |
144 | 144 // CHECK: ret void |
145 // CHECK: define linkonce_odr hidden void @__destructor_8_S_s16_s24(i8** noundef %[[DST:.*]]) | 145 |
146 // CHECK: %[[DST_ADDR:.*]] = alloca i8**, align 8 | 146 // CHECK: define linkonce_odr hidden void @__destructor_8_s16(ptr noundef %[[DST:.*]]) |
147 // CHECK: store i8** %[[DST]], i8*** %[[DST_ADDR]], align 8 | 147 // CHECK: %[[DST_ADDR:.*]] = alloca ptr, align 8 |
148 // CHECK: %[[V0:.*]] = load i8**, i8*** %[[DST_ADDR]], align 8 | 148 // CHECK: store ptr %[[DST]], ptr %[[DST_ADDR]], align 8 |
149 // CHECK: call void @__destructor_8_s16(i8** %[[V0]]) | 149 // CHECK: %[[V0:.*]] = load ptr, ptr %[[DST_ADDR]], align 8 |
150 // CHECK: %[[V1:.*]] = bitcast i8** %[[V0]] to i8* | 150 // CHECK: %[[V2:.*]] = getelementptr inbounds i8, ptr %[[V0]], i64 16 |
151 // CHECK: %[[V2:.*]] = getelementptr inbounds i8, i8* %[[V1]], i64 24 | 151 // CHECK: call void @llvm.objc.storeStrong(ptr %[[V2]], ptr null) |
152 // CHECK: %[[V3:.*]] = bitcast i8* %[[V2]] to i8** | |
153 // CHECK: call void @llvm.objc.storeStrong(i8** %[[V3]], i8* null) | |
154 // CHECK: ret void | |
155 | |
156 // CHECK: define linkonce_odr hidden void @__destructor_8_s16(i8** noundef %[[DST:.*]]) | |
157 // CHECK: %[[DST_ADDR:.*]] = alloca i8**, align 8 | |
158 // CHECK: store i8** %[[DST]], i8*** %[[DST_ADDR]], align 8 | |
159 // CHECK: %[[V0:.*]] = load i8**, i8*** %[[DST_ADDR]], align 8 | |
160 // CHECK: %[[V1:.*]] = bitcast i8** %[[V0]] to i8* | |
161 // CHECK: %[[V2:.*]] = getelementptr inbounds i8, i8* %[[V1]], i64 16 | |
162 // CHECK: %[[V3:.*]] = bitcast i8* %[[V2]] to i8** | |
163 // CHECK: call void @llvm.objc.storeStrong(i8** %[[V3]], i8* null) | |
164 // CHECK: ret void | 152 // CHECK: ret void |
165 | 153 |
166 void test_constructor_destructor_StrongOuter(void) { | 154 void test_constructor_destructor_StrongOuter(void) { |
167 StrongOuter t; | 155 StrongOuter t; |
168 } | 156 } |
169 | 157 |
170 // CHECK: define{{.*}} void @test_copy_constructor_StrongOuter(%[[STRUCT_STRONGOUTER]]* noundef %[[S:.*]]) | 158 // CHECK: define{{.*}} void @test_copy_constructor_StrongOuter(ptr noundef %[[S:.*]]) |
171 // CHECK: %[[S_ADDR:.*]] = alloca %[[STRUCT_STRONGOUTER]]*, align 8 | 159 // CHECK: %[[S_ADDR:.*]] = alloca ptr, align 8 |
172 // CHECK: %[[T:.*]] = alloca %[[STRUCT_STRONGOUTER]], align 8 | 160 // CHECK: %[[T:.*]] = alloca %[[STRUCT_STRONGOUTER]], align 8 |
173 // CHECK: store %[[STRUCT_STRONGOUTER]]* %[[S]], %[[STRUCT_STRONGOUTER]]** %[[S_ADDR]], align 8 | 161 // CHECK: store ptr %[[S]], ptr %[[S_ADDR]], align 8 |
174 // CHECK: %[[V0:.*]] = load %[[STRUCT_STRONGOUTER]]*, %[[STRUCT_STRONGOUTER]]** %[[S_ADDR]], align 8 | 162 // CHECK: %[[V0:.*]] = load ptr, ptr %[[S_ADDR]], align 8 |
175 // CHECK: %[[V1:.*]] = bitcast %[[STRUCT_STRONGOUTER]]* %[[T]] to i8** | 163 // CHECK: call void @__copy_constructor_8_8_S_t0w16_s16_s24_t32w8(ptr %[[T]], ptr %[[V0]]) |
176 // CHECK: %[[V2:.*]] = bitcast %[[STRUCT_STRONGOUTER]]* %[[V0]] to i8** | 164 // CHECK: call void @__destructor_8_S_s16_s24(ptr %[[T]]) |
177 // CHECK: call void @__copy_constructor_8_8_S_t0w16_s16_s24_t32w8(i8** %[[V1]], i8** %[[V2]]) | 165 // CHECK: ret void |
178 // CHECK: %[[V3:.*]] = bitcast %[[STRUCT_STRONGOUTER]]* %[[T]] to i8** | 166 |
179 // CHECK: call void @__destructor_8_S_s16_s24(i8** %[[V3]]) | 167 // CHECK: define linkonce_odr hidden void @__copy_constructor_8_8_S_t0w16_s16_s24_t32w8(ptr noundef %[[DST:.*]], ptr noundef %[[SRC:.*]]) |
180 // CHECK: ret void | 168 // CHECK: %[[DST_ADDR:.*]] = alloca ptr, align 8 |
181 | 169 // CHECK: %[[SRC_ADDR:.*]] = alloca ptr, align 8 |
182 // CHECK: define linkonce_odr hidden void @__copy_constructor_8_8_S_t0w16_s16_s24_t32w8(i8** noundef %[[DST:.*]], i8** noundef %[[SRC:.*]]) | 170 // CHECK: store ptr %[[DST]], ptr %[[DST_ADDR]], align 8 |
183 // CHECK: %[[DST_ADDR:.*]] = alloca i8**, align 8 | 171 // CHECK: store ptr %[[SRC]], ptr %[[SRC_ADDR]], align 8 |
184 // CHECK: %[[SRC_ADDR:.*]] = alloca i8**, align 8 | 172 // CHECK: %[[V0:.*]] = load ptr, ptr %[[DST_ADDR]], align 8 |
185 // CHECK: store i8** %[[DST]], i8*** %[[DST_ADDR]], align 8 | 173 // CHECK: %[[V1:.*]] = load ptr, ptr %[[SRC_ADDR]], align 8 |
186 // CHECK: store i8** %[[SRC]], i8*** %[[SRC_ADDR]], align 8 | 174 // CHECK: call void @__copy_constructor_8_8_t0w16_s16(ptr %[[V0]], ptr %[[V1]]) |
187 // CHECK: %[[V0:.*]] = load i8**, i8*** %[[DST_ADDR]], align 8 | 175 // CHECK: %[[V3:.*]] = getelementptr inbounds i8, ptr %[[V0]], i64 24 |
188 // CHECK: %[[V1:.*]] = load i8**, i8*** %[[SRC_ADDR]], align 8 | 176 // CHECK: %[[V6:.*]] = getelementptr inbounds i8, ptr %[[V1]], i64 24 |
189 // CHECK: call void @__copy_constructor_8_8_t0w16_s16(i8** %[[V0]], i8** %[[V1]]) | 177 // CHECK: %[[V8:.*]] = load ptr, ptr %[[V6]], align 8 |
190 // CHECK: %[[V2:.*]] = bitcast i8** %[[V0]] to i8* | 178 // CHECK: %[[V9:.*]] = call ptr @llvm.objc.retain(ptr %[[V8]]) |
191 // CHECK: %[[V3:.*]] = getelementptr inbounds i8, i8* %[[V2]], i64 24 | 179 // CHECK: store ptr %[[V9]], ptr %[[V3]], align 8 |
192 // CHECK: %[[V4:.*]] = bitcast i8* %[[V3]] to i8** | 180 // CHECK: %[[V11:.*]] = getelementptr inbounds i8, ptr %[[V0]], i64 32 |
193 // CHECK: %[[V5:.*]] = bitcast i8** %[[V1]] to i8* | 181 // CHECK: %[[V14:.*]] = getelementptr inbounds i8, ptr %[[V1]], i64 32 |
194 // CHECK: %[[V6:.*]] = getelementptr inbounds i8, i8* %[[V5]], i64 24 | 182 // CHECK: %[[V18:.*]] = load i64, ptr %[[V14]], align 8 |
195 // CHECK: %[[V7:.*]] = bitcast i8* %[[V6]] to i8** | 183 // CHECK: store i64 %[[V18]], ptr %[[V11]], align 8 |
196 // CHECK: %[[V8:.*]] = load i8*, i8** %[[V7]], align 8 | 184 // CHECK: ret void |
197 // CHECK: %[[V9:.*]] = call i8* @llvm.objc.retain(i8* %[[V8]]) | 185 |
198 // CHECK: store i8* %[[V9]], i8** %[[V4]], align 8 | 186 // CHECK: define linkonce_odr hidden void @__copy_constructor_8_8_t0w16_s16(ptr noundef %[[DST:.*]], ptr noundef %[[SRC:.*]]) |
199 // CHECK: %[[V10:.*]] = bitcast i8** %[[V0]] to i8* | 187 // CHECK: %[[DST_ADDR:.*]] = alloca ptr, align 8 |
200 // CHECK: %[[V11:.*]] = getelementptr inbounds i8, i8* %[[V10]], i64 32 | 188 // CHECK: %[[SRC_ADDR:.*]] = alloca ptr, align 8 |
201 // CHECK: %[[V12:.*]] = bitcast i8* %[[V11]] to i8** | 189 // CHECK: store ptr %[[DST]], ptr %[[DST_ADDR]], align 8 |
202 // CHECK: %[[V13:.*]] = bitcast i8** %[[V1]] to i8* | 190 // CHECK: store ptr %[[SRC]], ptr %[[SRC_ADDR]], align 8 |
203 // CHECK: %[[V14:.*]] = getelementptr inbounds i8, i8* %[[V13]], i64 32 | 191 // CHECK: %[[V0:.*]] = load ptr, ptr %[[DST_ADDR]], align 8 |
204 // CHECK: %[[V15:.*]] = bitcast i8* %[[V14]] to i8** | 192 // CHECK: %[[V1:.*]] = load ptr, ptr %[[SRC_ADDR]], align 8 |
205 // CHECK: %[[V16:.*]] = bitcast i8** %[[V12]] to i64* | 193 // CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 %[[V0]], ptr align 8 %[[V1]], i64 16, i1 false) |
206 // CHECK: %[[V17:.*]] = bitcast i8** %[[V15]] to i64* | 194 // CHECK: %[[V5:.*]] = getelementptr inbounds i8, ptr %[[V0]], i64 16 |
207 // CHECK: %[[V18:.*]] = load i64, i64* %[[V17]], align 8 | 195 // CHECK: %[[V8:.*]] = getelementptr inbounds i8, ptr %[[V1]], i64 16 |
208 // CHECK: store i64 %[[V18]], i64* %[[V16]], align 8 | 196 // CHECK: %[[V10:.*]] = load ptr, ptr %[[V8]], align 8 |
209 // CHECK: ret void | 197 // CHECK: %[[V11:.*]] = call ptr @llvm.objc.retain(ptr %[[V10]]) |
210 | 198 // CHECK: store ptr %[[V11]], ptr %[[V5]], align 8 |
211 // CHECK: define linkonce_odr hidden void @__copy_constructor_8_8_t0w16_s16(i8** noundef %[[DST:.*]], i8** noundef %[[SRC:.*]]) | |
212 // CHECK: %[[DST_ADDR:.*]] = alloca i8**, align 8 | |
213 // CHECK: %[[SRC_ADDR:.*]] = alloca i8**, align 8 | |
214 // CHECK: store i8** %[[DST]], i8*** %[[DST_ADDR]], align 8 | |
215 // CHECK: store i8** %[[SRC]], i8*** %[[SRC_ADDR]], align 8 | |
216 // CHECK: %[[V0:.*]] = load i8**, i8*** %[[DST_ADDR]], align 8 | |
217 // CHECK: %[[V1:.*]] = load i8**, i8*** %[[SRC_ADDR]], align 8 | |
218 // CHECK: %[[V2:.*]] = bitcast i8** %[[V0]] to i8* | |
219 // CHECK: %[[V3:.*]] = bitcast i8** %[[V1]] to i8* | |
220 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %[[V2]], i8* align 8 %[[V3]], i64 16, i1 false) | |
221 // CHECK: %[[V4:.*]] = bitcast i8** %[[V0]] to i8* | |
222 // CHECK: %[[V5:.*]] = getelementptr inbounds i8, i8* %[[V4]], i64 16 | |
223 // CHECK: %[[V6:.*]] = bitcast i8* %[[V5]] to i8** | |
224 // CHECK: %[[V7:.*]] = bitcast i8** %[[V1]] to i8* | |
225 // CHECK: %[[V8:.*]] = getelementptr inbounds i8, i8* %[[V7]], i64 16 | |
226 // CHECK: %[[V9:.*]] = bitcast i8* %[[V8]] to i8** | |
227 // CHECK: %[[V10:.*]] = load i8*, i8** %[[V9]], align 8 | |
228 // CHECK: %[[V11:.*]] = call i8* @llvm.objc.retain(i8* %[[V10]]) | |
229 // CHECK: store i8* %[[V11]], i8** %[[V6]], align 8 | |
230 // CHECK: ret void | 199 // CHECK: ret void |
231 | 200 |
232 void test_copy_constructor_StrongOuter(StrongOuter *s) { | 201 void test_copy_constructor_StrongOuter(StrongOuter *s) { |
233 StrongOuter t = *s; | 202 StrongOuter t = *s; |
234 } | 203 } |
235 | 204 |
236 /// CHECK: define linkonce_odr hidden void @__copy_assignment_8_8_S_t0w16_s16_s24_t32w8(i8** noundef %[[DST:.*]], i8** noundef %[[SRC:.*]]) | 205 /// CHECK: define linkonce_odr hidden void @__copy_assignment_8_8_S_t0w16_s16_s24_t32w8(ptr noundef %[[DST:.*]], ptr noundef %[[SRC:.*]]) |
237 // CHECK: %[[DST_ADDR:.*]] = alloca i8**, align 8 | 206 // CHECK: %[[DST_ADDR:.*]] = alloca ptr, align 8 |
238 // CHECK: %[[SRC_ADDR:.*]] = alloca i8**, align 8 | 207 // CHECK: %[[SRC_ADDR:.*]] = alloca ptr, align 8 |
239 // CHECK: store i8** %[[DST]], i8*** %[[DST_ADDR]], align 8 | 208 // CHECK: store ptr %[[DST]], ptr %[[DST_ADDR]], align 8 |
240 // CHECK: store i8** %[[SRC]], i8*** %[[SRC_ADDR]], align 8 | 209 // CHECK: store ptr %[[SRC]], ptr %[[SRC_ADDR]], align 8 |
241 // CHECK: %[[V0:.*]] = load i8**, i8*** %[[DST_ADDR]], align 8 | 210 // CHECK: %[[V0:.*]] = load ptr, ptr %[[DST_ADDR]], align 8 |
242 // CHECK: %[[V1:.*]] = load i8**, i8*** %[[SRC_ADDR]], align 8 | 211 // CHECK: %[[V1:.*]] = load ptr, ptr %[[SRC_ADDR]], align 8 |
243 // CHECK: %[[V2:.*]] = bitcast i8** %[[V0]] to i8* | 212 // CHECK: %[[V3:.*]] = getelementptr inbounds i8, ptr %[[V0]], i64 24 |
244 // CHECK: %[[V3:.*]] = getelementptr inbounds i8, i8* %[[V2]], i64 24 | 213 // CHECK: %[[V6:.*]] = getelementptr inbounds i8, ptr %[[V1]], i64 24 |
245 // CHECK: %[[V4:.*]] = bitcast i8* %[[V3]] to i8** | 214 // CHECK: %[[V8:.*]] = load ptr, ptr %[[V6]], align 8 |
246 // CHECK: %[[V5:.*]] = bitcast i8** %[[V1]] to i8* | 215 // CHECK: call void @llvm.objc.storeStrong(ptr %[[V3]], ptr %[[V8]]) |
247 // CHECK: %[[V6:.*]] = getelementptr inbounds i8, i8* %[[V5]], i64 24 | |
248 // CHECK: %[[V7:.*]] = bitcast i8* %[[V6]] to i8** | |
249 // CHECK: %[[V8:.*]] = load i8*, i8** %[[V7]], align 8 | |
250 // CHECK: call void @llvm.objc.storeStrong(i8** %[[V4]], i8* %[[V8]]) | |
251 | 216 |
252 void test_copy_assignment_StrongOuter(StrongOuter *d, StrongOuter *s) { | 217 void test_copy_assignment_StrongOuter(StrongOuter *d, StrongOuter *s) { |
253 *d = *s; | 218 *d = *s; |
254 } | 219 } |
255 | 220 |
256 // CHECK: define{{.*}} void @test_move_constructor_StrongOuter() | 221 // CHECK: define{{.*}} void @test_move_constructor_StrongOuter() |
257 // CHECK: %[[T1:.*]] = getelementptr inbounds %[[STRUCT_BLOCK_BYREF_T]], %[[STRUCT_BLOCK_BYREF_T]]* %{{.*}}, i32 0, i32 7 | 222 // CHECK: %[[T1:.*]] = getelementptr inbounds %[[STRUCT_BLOCK_BYREF_T]], ptr %{{.*}}, i32 0, i32 7 |
258 // CHECK: %[[V1:.*]] = bitcast %[[STRUCT_STRONGOUTER]]* %[[T1]] to i8** | 223 // CHECK: call void @__default_constructor_8_S_s16_s24(ptr %[[T1]]) |
259 // CHECK: call void @__default_constructor_8_S_s16_s24(i8** %[[V1]]) | 224 // CHECK: %[[T2:.*]] = getelementptr inbounds %[[STRUCT_BLOCK_BYREF_T]], ptr %{{.*}}, i32 0, i32 7 |
260 // CHECK: %[[T2:.*]] = getelementptr inbounds %[[STRUCT_BLOCK_BYREF_T]], %[[STRUCT_BLOCK_BYREF_T]]* %{{.*}}, i32 0, i32 7 | 225 // CHECK: call void @__destructor_8_S_s16_s24(ptr %[[T2]]) |
261 // CHECK: %[[V9:.*]] = bitcast %[[STRUCT_STRONGOUTER]]* %[[T2]] to i8** | 226 |
262 // CHECK: call void @__destructor_8_S_s16_s24(i8** %[[V9]]) | 227 // CHECK: define internal void @__Block_byref_object_copy_(ptr noundef %0, ptr noundef %1) |
263 | |
264 // CHECK: define internal void @__Block_byref_object_copy_(i8* noundef %0, i8* noundef %1) | |
265 // CHECK: call void @__move_constructor_8_8_S_t0w16_s16_s24_t32w8( | 228 // CHECK: call void @__move_constructor_8_8_S_t0w16_s16_s24_t32w8( |
266 | 229 |
267 // CHECK: define linkonce_odr hidden void @__move_constructor_8_8_S_t0w16_s16_s24_t32w8(i8** noundef %[[DST:.*]], i8** noundef %[[SRC:.*]]) | 230 // CHECK: define linkonce_odr hidden void @__move_constructor_8_8_S_t0w16_s16_s24_t32w8(ptr noundef %[[DST:.*]], ptr noundef %[[SRC:.*]]) |
268 // CHECK: %[[DST_ADDR:.*]] = alloca i8**, align 8 | 231 // CHECK: %[[DST_ADDR:.*]] = alloca ptr, align 8 |
269 // CHECK: %[[SRC_ADDR:.*]] = alloca i8**, align 8 | 232 // CHECK: %[[SRC_ADDR:.*]] = alloca ptr, align 8 |
270 // CHECK: store i8** %[[DST]], i8*** %[[DST_ADDR]], align 8 | 233 // CHECK: store ptr %[[DST]], ptr %[[DST_ADDR]], align 8 |
271 // CHECK: store i8** %[[SRC]], i8*** %[[SRC_ADDR]], align 8 | 234 // CHECK: store ptr %[[SRC]], ptr %[[SRC_ADDR]], align 8 |
272 // CHECK: %[[V0:.*]] = load i8**, i8*** %[[DST_ADDR]], align 8 | 235 // CHECK: %[[V0:.*]] = load ptr, ptr %[[DST_ADDR]], align 8 |
273 // CHECK: %[[V1:.*]] = load i8**, i8*** %[[SRC_ADDR]], align 8 | 236 // CHECK: %[[V1:.*]] = load ptr, ptr %[[SRC_ADDR]], align 8 |
274 // CHECK: call void @__move_constructor_8_8_t0w16_s16(i8** %[[V0]], i8** %[[V1]]) | 237 // CHECK: call void @__move_constructor_8_8_t0w16_s16(ptr %[[V0]], ptr %[[V1]]) |
275 // CHECK: %[[V2:.*]] = bitcast i8** %[[V0]] to i8* | 238 // CHECK: %[[V3:.*]] = getelementptr inbounds i8, ptr %[[V0]], i64 24 |
276 // CHECK: %[[V3:.*]] = getelementptr inbounds i8, i8* %[[V2]], i64 24 | 239 // CHECK: %[[V6:.*]] = getelementptr inbounds i8, ptr %[[V1]], i64 24 |
277 // CHECK: %[[V4:.*]] = bitcast i8* %[[V3]] to i8** | 240 // CHECK: %[[V8:.*]] = load ptr, ptr %[[V6]], align 8 |
278 // CHECK: %[[V5:.*]] = bitcast i8** %[[V1]] to i8* | 241 // CHECK: store ptr null, ptr %[[V6]], align 8 |
279 // CHECK: %[[V6:.*]] = getelementptr inbounds i8, i8* %[[V5]], i64 24 | 242 // CHECK: store ptr %[[V8]], ptr %[[V3]], align 8 |
280 // CHECK: %[[V7:.*]] = bitcast i8* %[[V6]] to i8** | 243 |
281 // CHECK: %[[V8:.*]] = load i8*, i8** %[[V7]], align 8 | 244 // CHECK: define internal void @__Block_byref_object_dispose_(ptr noundef %0) |
282 // CHECK: store i8* null, i8** %[[V7]], align 8 | |
283 // CHECK: store i8* %[[V8]], i8** %[[V4]], align 8 | |
284 | |
285 // CHECK: define internal void @__Block_byref_object_dispose_(i8* noundef %0) | |
286 // CHECK: call void @__destructor_8_S_s16_s24( | 245 // CHECK: call void @__destructor_8_S_s16_s24( |
287 | 246 |
288 void test_move_constructor_StrongOuter(void) { | 247 void test_move_constructor_StrongOuter(void) { |
289 __block StrongOuter t; | 248 __block StrongOuter t; |
290 BlockTy b = ^{ (void)t; }; | 249 BlockTy b = ^{ (void)t; }; |
291 } | 250 } |
292 | 251 |
293 // CHECK: define linkonce_odr hidden void @__move_assignment_8_8_S_t0w16_s16_s24_t32w8(i8** noundef %[[DST:.*]], i8** noundef %[[SRC:.*]]) | 252 // CHECK: define linkonce_odr hidden void @__move_assignment_8_8_S_t0w16_s16_s24_t32w8(ptr noundef %[[DST:.*]], ptr noundef %[[SRC:.*]]) |
294 // CHECK: %[[DST_ADDR:.*]] = alloca i8**, align 8 | 253 // CHECK: %[[DST_ADDR:.*]] = alloca ptr, align 8 |
295 // CHECK: %[[SRC_ADDR:.*]] = alloca i8**, align 8 | 254 // CHECK: %[[SRC_ADDR:.*]] = alloca ptr, align 8 |
296 // CHECK: store i8** %[[DST]], i8*** %[[DST_ADDR]], align 8 | 255 // CHECK: store ptr %[[DST]], ptr %[[DST_ADDR]], align 8 |
297 // CHECK: store i8** %[[SRC]], i8*** %[[SRC_ADDR]], align 8 | 256 // CHECK: store ptr %[[SRC]], ptr %[[SRC_ADDR]], align 8 |
298 // CHECK: %[[V0:.*]] = load i8**, i8*** %[[DST_ADDR]], align 8 | 257 // CHECK: %[[V0:.*]] = load ptr, ptr %[[DST_ADDR]], align 8 |
299 // CHECK: %[[V1:.*]] = load i8**, i8*** %[[SRC_ADDR]], align 8 | 258 // CHECK: %[[V1:.*]] = load ptr, ptr %[[SRC_ADDR]], align 8 |
300 // CHECK: call void @__move_assignment_8_8_t0w16_s16(i8** %[[V0]], i8** %[[V1]]) | 259 // CHECK: call void @__move_assignment_8_8_t0w16_s16(ptr %[[V0]], ptr %[[V1]]) |
301 // CHECK: %[[V2:.*]] = bitcast i8** %[[V0]] to i8* | 260 // CHECK: %[[V3:.*]] = getelementptr inbounds i8, ptr %[[V0]], i64 24 |
302 // CHECK: %[[V3:.*]] = getelementptr inbounds i8, i8* %[[V2]], i64 24 | 261 // CHECK: %[[V6:.*]] = getelementptr inbounds i8, ptr %[[V1]], i64 24 |
303 // CHECK: %[[V4:.*]] = bitcast i8* %[[V3]] to i8** | 262 // CHECK: %[[V8:.*]] = load ptr, ptr %[[V6]], align 8 |
304 // CHECK: %[[V5:.*]] = bitcast i8** %[[V1]] to i8* | 263 // CHECK: store ptr null, ptr %[[V6]], align 8 |
305 // CHECK: %[[V6:.*]] = getelementptr inbounds i8, i8* %[[V5]], i64 24 | 264 // CHECK: %[[V9:.*]] = load ptr, ptr %[[V3]], align 8 |
306 // CHECK: %[[V7:.*]] = bitcast i8* %[[V6]] to i8** | 265 // CHECK: store ptr %[[V8]], ptr %[[V3]], align 8 |
307 // CHECK: %[[V8:.*]] = load i8*, i8** %[[V7]], align 8 | 266 // CHECK: call void @llvm.objc.release(ptr %[[V9]]) |
308 // CHECK: store i8* null, i8** %[[V7]], align 8 | |
309 // CHECK: %[[V9:.*]] = load i8*, i8** %[[V4]], align 8 | |
310 // CHECK: store i8* %[[V8]], i8** %[[V4]], align 8 | |
311 // CHECK: call void @llvm.objc.release(i8* %[[V9]]) | |
312 | 267 |
313 void test_move_assignment_StrongOuter(StrongOuter *p) { | 268 void test_move_assignment_StrongOuter(StrongOuter *p) { |
314 *p = getStrongOuter(); | 269 *p = getStrongOuter(); |
315 } | 270 } |
316 | 271 |
317 // CHECK: define linkonce_odr hidden void @__default_constructor_8_s0_S_s24(i8** noundef %[[DST:.*]]) | 272 // CHECK: define linkonce_odr hidden void @__default_constructor_8_s0_S_s24(ptr noundef %[[DST:.*]]) |
318 // CHECK: %[[DST_ADDR:.*]] = alloca i8**, align 8 | 273 // CHECK: %[[DST_ADDR:.*]] = alloca ptr, align 8 |
319 // CHECK: store i8** %[[DST]], i8*** %[[DST_ADDR]], align 8 | 274 // CHECK: store ptr %[[DST]], ptr %[[DST_ADDR]], align 8 |
320 // CHECK: %[[V0:.*]] = load i8**, i8*** %[[DST_ADDR]], align 8 | 275 // CHECK: %[[V0:.*]] = load ptr, ptr %[[DST_ADDR]], align 8 |
321 // CHECK: %[[V1:.*]] = bitcast i8** %[[V0]] to i8* | 276 // CHECK: call void @llvm.memset.p0.i64(ptr align 8 %[[V0]], i8 0, i64 8, i1 false) |
322 // CHECK: call void @llvm.memset.p0i8.i64(i8* align 8 %[[V1]], i8 0, i64 8, i1 false) | 277 // CHECK: %[[V3:.*]] = getelementptr inbounds i8, ptr %[[V0]], i64 8 |
323 // CHECK: %[[V2:.*]] = bitcast i8** %[[V0]] to i8* | 278 // CHECK: call void @__default_constructor_8_s16(ptr %[[V3]]) |
324 // CHECK: %[[V3:.*]] = getelementptr inbounds i8, i8* %[[V2]], i64 8 | 279 |
325 // CHECK: %[[V4:.*]] = bitcast i8* %[[V3]] to i8** | 280 // CHECK: define linkonce_odr hidden void @__destructor_8_s0_S_s24(ptr noundef %[[DST:.*]]) |
326 // CHECK: call void @__default_constructor_8_s16(i8** %[[V4]]) | 281 // CHECK: %[[DST_ADDR:.*]] = alloca ptr, align 8 |
327 | 282 // CHECK: store ptr %[[DST]], ptr %[[DST_ADDR]], align 8 |
328 // CHECK: define linkonce_odr hidden void @__destructor_8_s0_S_s24(i8** noundef %[[DST:.*]]) | 283 // CHECK: %[[V0:.*]] = load ptr, ptr %[[DST_ADDR]], align 8 |
329 // CHECK: %[[DST_ADDR:.*]] = alloca i8**, align 8 | 284 // CHECK: call void @llvm.objc.storeStrong(ptr %[[V0]], ptr null) |
330 // CHECK: store i8** %[[DST]], i8*** %[[DST_ADDR]], align 8 | 285 // CHECK: %[[V2:.*]] = getelementptr inbounds i8, ptr %[[V0]], i64 8 |
331 // CHECK: %[[V0:.*]] = load i8**, i8*** %[[DST_ADDR]], align 8 | 286 // CHECK: call void @__destructor_8_s16(ptr %[[V2]]) |
332 // CHECK: call void @llvm.objc.storeStrong(i8** %[[V0]], i8* null) | |
333 // CHECK: %[[V1:.*]] = bitcast i8** %[[V0]] to i8* | |
334 // CHECK: %[[V2:.*]] = getelementptr inbounds i8, i8* %[[V1]], i64 8 | |
335 // CHECK: %[[V3:.*]] = bitcast i8* %[[V2]] to i8** | |
336 // CHECK: call void @__destructor_8_s16(i8** %[[V3]]) | |
337 | 287 |
338 void test_constructor_destructor_StrongOuter2(void) { | 288 void test_constructor_destructor_StrongOuter2(void) { |
339 StrongOuter2 t; | 289 StrongOuter2 t; |
340 } | 290 } |
341 | 291 |
342 // CHECK: define linkonce_odr hidden void @__copy_constructor_8_8_s0_S_t8w16_s24(i8** noundef %[[DST:.*]], i8** noundef %[[SRC:.*]]) | 292 // CHECK: define linkonce_odr hidden void @__copy_constructor_8_8_s0_S_t8w16_s24(ptr noundef %[[DST:.*]], ptr noundef %[[SRC:.*]]) |
343 // CHECK: %[[DST_ADDR:.*]] = alloca i8**, align 8 | 293 // CHECK: %[[DST_ADDR:.*]] = alloca ptr, align 8 |
344 // CHECK: %[[SRC_ADDR:.*]] = alloca i8**, align 8 | 294 // CHECK: %[[SRC_ADDR:.*]] = alloca ptr, align 8 |
345 // CHECK: store i8** %[[DST]], i8*** %[[DST_ADDR]], align 8 | 295 // CHECK: store ptr %[[DST]], ptr %[[DST_ADDR]], align 8 |
346 // CHECK: store i8** %[[SRC]], i8*** %[[SRC_ADDR]], align 8 | 296 // CHECK: store ptr %[[SRC]], ptr %[[SRC_ADDR]], align 8 |
347 // CHECK: %[[V0:.*]] = load i8**, i8*** %[[DST_ADDR]], align 8 | 297 // CHECK: %[[V0:.*]] = load ptr, ptr %[[DST_ADDR]], align 8 |
348 // CHECK: %[[V1:.*]] = load i8**, i8*** %[[SRC_ADDR]], align 8 | 298 // CHECK: %[[V1:.*]] = load ptr, ptr %[[SRC_ADDR]], align 8 |
349 // CHECK: %[[V2:.*]] = load i8*, i8** %[[V1]], align 8 | 299 // CHECK: %[[V2:.*]] = load ptr, ptr %[[V1]], align 8 |
350 // CHECK: %[[V3:.*]] = call i8* @llvm.objc.retain(i8* %[[V2]]) | 300 // CHECK: %[[V3:.*]] = call ptr @llvm.objc.retain(ptr %[[V2]]) |
351 // CHECK: store i8* %[[V3]], i8** %[[V0]], align 8 | 301 // CHECK: store ptr %[[V3]], ptr %[[V0]], align 8 |
352 // CHECK: %[[V4:.*]] = bitcast i8** %[[V0]] to i8* | 302 // CHECK: %[[V5:.*]] = getelementptr inbounds i8, ptr %[[V0]], i64 8 |
353 // CHECK: %[[V5:.*]] = getelementptr inbounds i8, i8* %[[V4]], i64 8 | 303 // CHECK: %[[V8:.*]] = getelementptr inbounds i8, ptr %[[V1]], i64 8 |
354 // CHECK: %[[V6:.*]] = bitcast i8* %[[V5]] to i8** | 304 // CHECK: call void @__copy_constructor_8_8_t0w16_s16(ptr %[[V5]], ptr %[[V8]]) |
355 // CHECK: %[[V7:.*]] = bitcast i8** %[[V1]] to i8* | |
356 // CHECK: %[[V8:.*]] = getelementptr inbounds i8, i8* %[[V7]], i64 8 | |
357 // CHECK: %[[V9:.*]] = bitcast i8* %[[V8]] to i8** | |
358 // CHECK: call void @__copy_constructor_8_8_t0w16_s16(i8** %[[V6]], i8** %[[V9]]) | |
359 | 305 |
360 void test_copy_constructor_StrongOuter2(StrongOuter2 *s) { | 306 void test_copy_constructor_StrongOuter2(StrongOuter2 *s) { |
361 StrongOuter2 t = *s; | 307 StrongOuter2 t = *s; |
362 } | 308 } |
363 | 309 |
364 // CHECK: define linkonce_odr hidden void @__copy_assignment_8_8_s0_S_t8w16_s24(i8** noundef %[[DST:.*]], i8** noundef %[[SRC:.*]]) | 310 // CHECK: define linkonce_odr hidden void @__copy_assignment_8_8_s0_S_t8w16_s24(ptr noundef %[[DST:.*]], ptr noundef %[[SRC:.*]]) |
365 // CHECK: %[[DST_ADDR:.*]] = alloca i8**, align 8 | 311 // CHECK: %[[DST_ADDR:.*]] = alloca ptr, align 8 |
366 // CHECK: %[[SRC_ADDR:.*]] = alloca i8**, align 8 | 312 // CHECK: %[[SRC_ADDR:.*]] = alloca ptr, align 8 |
367 // CHECK: store i8** %[[DST]], i8*** %[[DST_ADDR]], align 8 | 313 // CHECK: store ptr %[[DST]], ptr %[[DST_ADDR]], align 8 |
368 // CHECK: store i8** %[[SRC]], i8*** %[[SRC_ADDR]], align 8 | 314 // CHECK: store ptr %[[SRC]], ptr %[[SRC_ADDR]], align 8 |
369 // CHECK: %[[V0:.*]] = load i8**, i8*** %[[DST_ADDR]], align 8 | 315 // CHECK: %[[V0:.*]] = load ptr, ptr %[[DST_ADDR]], align 8 |
370 // CHECK: %[[V1:.*]] = load i8**, i8*** %[[SRC_ADDR]], align 8 | 316 // CHECK: %[[V1:.*]] = load ptr, ptr %[[SRC_ADDR]], align 8 |
371 // CHECK: %[[V2:.*]] = load i8*, i8** %[[V1]], align 8 | 317 // CHECK: %[[V2:.*]] = load ptr, ptr %[[V1]], align 8 |
372 // CHECK: call void @llvm.objc.storeStrong(i8** %[[V0]], i8* %[[V2]]) | 318 // CHECK: call void @llvm.objc.storeStrong(ptr %[[V0]], ptr %[[V2]]) |
373 // CHECK: %[[V3:.*]] = bitcast i8** %[[V0]] to i8* | 319 // CHECK: %[[V4:.*]] = getelementptr inbounds i8, ptr %[[V0]], i64 8 |
374 // CHECK: %[[V4:.*]] = getelementptr inbounds i8, i8* %[[V3]], i64 8 | 320 // CHECK: %[[V7:.*]] = getelementptr inbounds i8, ptr %[[V1]], i64 8 |
375 // CHECK: %[[V5:.*]] = bitcast i8* %[[V4]] to i8** | 321 // CHECK: call void @__copy_assignment_8_8_t0w16_s16(ptr %[[V4]], ptr %[[V7]]) |
376 // CHECK: %[[V6:.*]] = bitcast i8** %[[V1]] to i8* | |
377 // CHECK: %[[V7:.*]] = getelementptr inbounds i8, i8* %[[V6]], i64 8 | |
378 // CHECK: %[[V8:.*]] = bitcast i8* %[[V7]] to i8** | |
379 // CHECK: call void @__copy_assignment_8_8_t0w16_s16(i8** %[[V5]], i8** %[[V8]]) | |
380 | 322 |
381 void test_copy_assignment_StrongOuter2(StrongOuter2 *d, StrongOuter2 *s) { | 323 void test_copy_assignment_StrongOuter2(StrongOuter2 *d, StrongOuter2 *s) { |
382 *d = *s; | 324 *d = *s; |
383 } | 325 } |
384 | 326 |
385 // CHECK: define linkonce_odr hidden void @__move_constructor_8_8_s0_S_t8w16_s24(i8** noundef %[[DST:.*]], i8** noundef %[[SRC:.*]]) | 327 // CHECK: define linkonce_odr hidden void @__move_constructor_8_8_s0_S_t8w16_s24(ptr noundef %[[DST:.*]], ptr noundef %[[SRC:.*]]) |
386 // CHECK: %[[DST_ADDR:.*]] = alloca i8**, align 8 | 328 // CHECK: %[[DST_ADDR:.*]] = alloca ptr, align 8 |
387 // CHECK: %[[SRC_ADDR:.*]] = alloca i8**, align 8 | 329 // CHECK: %[[SRC_ADDR:.*]] = alloca ptr, align 8 |
388 // CHECK: store i8** %[[DST]], i8*** %[[DST_ADDR]], align 8 | 330 // CHECK: store ptr %[[DST]], ptr %[[DST_ADDR]], align 8 |
389 // CHECK: store i8** %[[SRC]], i8*** %[[SRC_ADDR]], align 8 | 331 // CHECK: store ptr %[[SRC]], ptr %[[SRC_ADDR]], align 8 |
390 // CHECK: %[[V0:.*]] = load i8**, i8*** %[[DST_ADDR]], align 8 | 332 // CHECK: %[[V0:.*]] = load ptr, ptr %[[DST_ADDR]], align 8 |
391 // CHECK: %[[V1:.*]] = load i8**, i8*** %[[SRC_ADDR]], align 8 | 333 // CHECK: %[[V1:.*]] = load ptr, ptr %[[SRC_ADDR]], align 8 |
392 // CHECK: %[[V2:.*]] = load i8*, i8** %[[V1]], align 8 | 334 // CHECK: %[[V2:.*]] = load ptr, ptr %[[V1]], align 8 |
393 // CHECK: store i8* null, i8** %[[V1]], align 8 | 335 // CHECK: store ptr null, ptr %[[V1]], align 8 |
394 // CHECK: store i8* %[[V2]], i8** %[[V0]], align 8 | 336 // CHECK: store ptr %[[V2]], ptr %[[V0]], align 8 |
395 // CHECK: %[[V3:.*]] = bitcast i8** %[[V0]] to i8* | 337 // CHECK: %[[V4:.*]] = getelementptr inbounds i8, ptr %[[V0]], i64 8 |
396 // CHECK: %[[V4:.*]] = getelementptr inbounds i8, i8* %[[V3]], i64 8 | 338 // CHECK: %[[V7:.*]] = getelementptr inbounds i8, ptr %[[V1]], i64 8 |
397 // CHECK: %[[V5:.*]] = bitcast i8* %[[V4]] to i8** | 339 // CHECK: call void @__move_constructor_8_8_t0w16_s16(ptr %[[V4]], ptr %[[V7]]) |
398 // CHECK: %[[V6:.*]] = bitcast i8** %[[V1]] to i8* | |
399 // CHECK: %[[V7:.*]] = getelementptr inbounds i8, i8* %[[V6]], i64 8 | |
400 // CHECK: %[[V8:.*]] = bitcast i8* %[[V7]] to i8** | |
401 // CHECK: call void @__move_constructor_8_8_t0w16_s16(i8** %[[V5]], i8** %[[V8]]) | |
402 | 340 |
403 void test_move_constructor_StrongOuter2(void) { | 341 void test_move_constructor_StrongOuter2(void) { |
404 __block StrongOuter2 t; | 342 __block StrongOuter2 t; |
405 BlockTy b = ^{ (void)t; }; | 343 BlockTy b = ^{ (void)t; }; |
406 } | 344 } |
407 | 345 |
408 // CHECK: define linkonce_odr hidden void @__move_assignment_8_8_s0_S_t8w16_s24(i8** noundef %[[DST:.*]], i8** noundef %[[SRC:.*]]) | 346 // CHECK: define linkonce_odr hidden void @__move_assignment_8_8_s0_S_t8w16_s24(ptr noundef %[[DST:.*]], ptr noundef %[[SRC:.*]]) |
409 // CHECK: %[[DST_ADDR:.*]] = alloca i8**, align 8 | 347 // CHECK: %[[DST_ADDR:.*]] = alloca ptr, align 8 |
410 // CHECK: %[[SRC_ADDR:.*]] = alloca i8**, align 8 | 348 // CHECK: %[[SRC_ADDR:.*]] = alloca ptr, align 8 |
411 // CHECK: store i8** %[[DST]], i8*** %[[DST_ADDR]], align 8 | 349 // CHECK: store ptr %[[DST]], ptr %[[DST_ADDR]], align 8 |
412 // CHECK: store i8** %[[SRC]], i8*** %[[SRC_ADDR]], align 8 | 350 // CHECK: store ptr %[[SRC]], ptr %[[SRC_ADDR]], align 8 |
413 // CHECK: %[[V0:.*]] = load i8**, i8*** %[[DST_ADDR]], align 8 | 351 // CHECK: %[[V0:.*]] = load ptr, ptr %[[DST_ADDR]], align 8 |
414 // CHECK: %[[V1:.*]] = load i8**, i8*** %[[SRC_ADDR]], align 8 | 352 // CHECK: %[[V1:.*]] = load ptr, ptr %[[SRC_ADDR]], align 8 |
415 // CHECK: %[[V2:.*]] = load i8*, i8** %[[V1]], align 8 | 353 // CHECK: %[[V2:.*]] = load ptr, ptr %[[V1]], align 8 |
416 // CHECK: store i8* null, i8** %[[V1]], align 8 | 354 // CHECK: store ptr null, ptr %[[V1]], align 8 |
417 // CHECK: %[[V3:.*]] = load i8*, i8** %[[V0]], align 8 | 355 // CHECK: %[[V3:.*]] = load ptr, ptr %[[V0]], align 8 |
418 // CHECK: store i8* %[[V2]], i8** %[[V0]], align 8 | 356 // CHECK: store ptr %[[V2]], ptr %[[V0]], align 8 |
419 // CHECK: call void @llvm.objc.release(i8* %[[V3]]) | 357 // CHECK: call void @llvm.objc.release(ptr %[[V3]]) |
420 // CHECK: %[[V4:.*]] = bitcast i8** %[[V0]] to i8* | 358 // CHECK: %[[V5:.*]] = getelementptr inbounds i8, ptr %[[V0]], i64 8 |
421 // CHECK: %[[V5:.*]] = getelementptr inbounds i8, i8* %[[V4]], i64 8 | 359 // CHECK: %[[V8:.*]] = getelementptr inbounds i8, ptr %[[V1]], i64 8 |
422 // CHECK: %[[V6:.*]] = bitcast i8* %[[V5]] to i8** | 360 // CHECK: call void @__move_assignment_8_8_t0w16_s16(ptr %[[V5]], ptr %[[V8]]) |
423 // CHECK: %[[V7:.*]] = bitcast i8** %[[V1]] to i8* | |
424 // CHECK: %[[V8:.*]] = getelementptr inbounds i8, i8* %[[V7]], i64 8 | |
425 // CHECK: %[[V9:.*]] = bitcast i8* %[[V8]] to i8** | |
426 // CHECK: call void @__move_assignment_8_8_t0w16_s16(i8** %[[V6]], i8** %[[V9]]) | |
427 | 361 |
428 void test_move_assignment_StrongOuter2(StrongOuter2 *p) { | 362 void test_move_assignment_StrongOuter2(StrongOuter2 *p) { |
429 *p = getStrongOuter2(); | 363 *p = getStrongOuter2(); |
430 } | 364 } |
431 | 365 |
432 // CHECK: define{{.*}} void @test_parameter_StrongSmall([2 x i64] %[[A_COERCE:.*]]) | 366 // CHECK: define{{.*}} void @test_parameter_StrongSmall([2 x i64] %[[A_COERCE:.*]]) |
433 // CHECK: %[[A:.*]] = alloca %[[STRUCT_STRONGSMALL]], align 8 | 367 // CHECK: %[[A:.*]] = alloca %[[STRUCT_STRONGSMALL]], align 8 |
434 // CHECK: %[[V0:.*]] = bitcast %[[STRUCT_STRONGSMALL]]* %[[A]] to [2 x i64]* | 368 // CHECK: store [2 x i64] %[[A_COERCE]], ptr %[[A]], align 8 |
435 // CHECK: store [2 x i64] %[[A_COERCE]], [2 x i64]* %[[V0]], align 8 | 369 // CHECK: call void @__destructor_8_s8(ptr %[[A]]) |
436 // CHECK: %[[V1:.*]] = bitcast %[[STRUCT_STRONGSMALL]]* %[[A]] to i8** | |
437 // CHECK: call void @__destructor_8_s8(i8** %[[V1]]) | |
438 // CHECK: ret void | 370 // CHECK: ret void |
439 | 371 |
440 void test_parameter_StrongSmall(StrongSmall a) { | 372 void test_parameter_StrongSmall(StrongSmall a) { |
441 } | 373 } |
442 | 374 |
443 // CHECK: define{{.*}} void @test_argument_StrongSmall([2 x i64] %[[A_COERCE:.*]]) | 375 // CHECK: define{{.*}} void @test_argument_StrongSmall([2 x i64] %[[A_COERCE:.*]]) |
444 // CHECK: %[[A:.*]] = alloca %[[STRUCT_STRONGSMALL]], align 8 | 376 // CHECK: %[[A:.*]] = alloca %[[STRUCT_STRONGSMALL]], align 8 |
445 // CHECK: %[[TEMP_LVALUE:.*]] = alloca %[[STRUCT_STRONGSMALL]], align 8 | 377 // CHECK: %[[TEMP_LVALUE:.*]] = alloca %[[STRUCT_STRONGSMALL]], align 8 |
446 // CHECK: %[[V0:.*]] = bitcast %[[STRUCT_STRONGSMALL]]* %[[A]] to [2 x i64]* | 378 // CHECK: store [2 x i64] %[[A_COERCE]], ptr %[[A]], align 8 |
447 // CHECK: store [2 x i64] %[[A_COERCE]], [2 x i64]* %[[V0]], align 8 | 379 // CHECK: call void @__copy_constructor_8_8_t0w4_s8(ptr %[[TEMP_LVALUE]], ptr %[[A]]) |
448 // CHECK: %[[V1:.*]] = bitcast %[[STRUCT_STRONGSMALL]]* %[[TEMP_LVALUE]] to i8** | 380 // CHECK: %[[V4:.*]] = load [2 x i64], ptr %[[TEMP_LVALUE]], align 8 |
449 // CHECK: %[[V2:.*]] = bitcast %[[STRUCT_STRONGSMALL]]* %[[A]] to i8** | |
450 // CHECK: call void @__copy_constructor_8_8_t0w4_s8(i8** %[[V1]], i8** %[[V2]]) | |
451 // CHECK: %[[V3:.*]] = bitcast %[[STRUCT_STRONGSMALL]]* %[[TEMP_LVALUE]] to [2 x i64]* | |
452 // CHECK: %[[V4:.*]] = load [2 x i64], [2 x i64]* %[[V3]], align 8 | |
453 // CHECK: call void @calleeStrongSmall([2 x i64] %[[V4]]) | 381 // CHECK: call void @calleeStrongSmall([2 x i64] %[[V4]]) |
454 // CHECK: %[[V5:.*]] = bitcast %[[STRUCT_STRONGSMALL]]* %[[A]] to i8** | 382 // CHECK: call void @__destructor_8_s8(ptr %[[A]]) |
455 // CHECK: call void @__destructor_8_s8(i8** %[[V5]]) | |
456 // CHECK: ret void | 383 // CHECK: ret void |
457 | 384 |
458 void test_argument_StrongSmall(StrongSmall a) { | 385 void test_argument_StrongSmall(StrongSmall a) { |
459 calleeStrongSmall(a); | 386 calleeStrongSmall(a); |
460 } | 387 } |
461 | 388 |
462 // CHECK: define{{.*}} [2 x i64] @test_return_StrongSmall([2 x i64] %[[A_COERCE:.*]]) | 389 // CHECK: define{{.*}} [2 x i64] @test_return_StrongSmall([2 x i64] %[[A_COERCE:.*]]) |
463 // CHECK: %[[RETVAL:.*]] = alloca %[[STRUCT_STRONGSMALL]], align 8 | 390 // CHECK: %[[RETVAL:.*]] = alloca %[[STRUCT_STRONGSMALL]], align 8 |
464 // CHECK: %[[A:.*]] = alloca %[[STRUCT_STRONGSMALL]], align 8 | 391 // CHECK: %[[A:.*]] = alloca %[[STRUCT_STRONGSMALL]], align 8 |
465 // CHECK: %[[V0:.*]] = bitcast %[[STRUCT_STRONGSMALL]]* %[[A]] to [2 x i64]* | 392 // CHECK: store [2 x i64] %[[A_COERCE]], ptr %[[A]], align 8 |
466 // CHECK: store [2 x i64] %[[A_COERCE]], [2 x i64]* %[[V0]], align 8 | 393 // CHECK: call void @__copy_constructor_8_8_t0w4_s8(ptr %[[RETVAL]], ptr %[[A]]) |
467 // CHECK: %[[V1:.*]] = bitcast %[[STRUCT_STRONGSMALL]]* %[[RETVAL]] to i8** | 394 // CHECK: call void @__destructor_8_s8(ptr %[[A]]) |
468 // CHECK: %[[V2:.*]] = bitcast %[[STRUCT_STRONGSMALL]]* %[[A]] to i8** | 395 // CHECK: %[[V5:.*]] = load [2 x i64], ptr %[[RETVAL]], align 8 |
469 // CHECK: call void @__copy_constructor_8_8_t0w4_s8(i8** %[[V1]], i8** %[[V2]]) | |
470 // CHECK: %[[V3:.*]] = bitcast %[[STRUCT_STRONGSMALL]]* %[[A]] to i8** | |
471 // CHECK: call void @__destructor_8_s8(i8** %[[V3]]) | |
472 // CHECK: %[[V4:.*]] = bitcast %[[STRUCT_STRONGSMALL]]* %[[RETVAL]] to [2 x i64]* | |
473 // CHECK: %[[V5:.*]] = load [2 x i64], [2 x i64]* %[[V4]], align 8 | |
474 // CHECK: ret [2 x i64] %[[V5]] | 396 // CHECK: ret [2 x i64] %[[V5]] |
475 | 397 |
476 StrongSmall test_return_StrongSmall(StrongSmall a) { | 398 StrongSmall test_return_StrongSmall(StrongSmall a) { |
477 return a; | 399 return a; |
478 } | 400 } |
479 | 401 |
480 // CHECK: define{{.*}} void @test_destructor_ignored_result() | 402 // CHECK: define{{.*}} void @test_destructor_ignored_result() |
481 // CHECK: %[[COERCE:.*]] = alloca %[[STRUCT_STRONGSMALL]], align 8 | 403 // CHECK: %[[COERCE:.*]] = alloca %[[STRUCT_STRONGSMALL]], align 8 |
482 // CHECK: %[[CALL:.*]] = call [2 x i64] @getStrongSmall() | 404 // CHECK: %[[CALL:.*]] = call [2 x i64] @getStrongSmall() |
483 // CHECK: %[[V0:.*]] = bitcast %[[STRUCT_STRONGSMALL]]* %[[COERCE]] to [2 x i64]* | 405 // CHECK: store [2 x i64] %[[CALL]], ptr %[[COERCE]], align 8 |
484 // CHECK: store [2 x i64] %[[CALL]], [2 x i64]* %[[V0]], align 8 | 406 // CHECK: call void @__destructor_8_s8(ptr %[[COERCE]]) |
485 // CHECK: %[[V1:.*]] = bitcast %[[STRUCT_STRONGSMALL]]* %[[COERCE]] to i8** | |
486 // CHECK: call void @__destructor_8_s8(i8** %[[V1]]) | |
487 // CHECK: ret void | 407 // CHECK: ret void |
488 | 408 |
489 void test_destructor_ignored_result(void) { | 409 void test_destructor_ignored_result(void) { |
490 getStrongSmall(); | 410 getStrongSmall(); |
491 } | 411 } |
492 | 412 |
493 // CHECK: define{{.*}} void @test_destructor_ignored_result2(%{{.*}}* noundef %[[C:.*]]) | 413 // CHECK: define{{.*}} void @test_destructor_ignored_result2(ptr noundef %[[C:.*]]) |
494 // CHECK: %[[TMP:.*]] = alloca %[[STRUCT_STRONGSMALL]], align 8 | 414 // CHECK: %[[TMP:.*]] = alloca %[[STRUCT_STRONGSMALL]], align 8 |
495 // CHECK: %[[CALL:.*]] = call [2 x i64]{{.*}}@objc_msgSend | 415 // CHECK: %[[CALL:.*]] = call [2 x i64]{{.*}}@objc_msgSend |
496 // CHECK: %[[V5:.*]] = bitcast %[[STRUCT_STRONGSMALL]]* %[[TMP]] to [2 x i64]* | 416 // CHECK: store [2 x i64] %[[CALL]], ptr %[[TMP]], align 8 |
497 // CHECK: store [2 x i64] %[[CALL]], [2 x i64]* %[[V5]], align 8 | 417 // CHECK: call void @__destructor_8_s8(ptr %[[TMP]]) |
498 // CHECK: %[[V6:.*]] = bitcast %[[STRUCT_STRONGSMALL]]* %[[TMP]] to i8** | |
499 // CHECK: call void @__destructor_8_s8(i8** %[[V6]]) | |
500 | 418 |
501 void test_destructor_ignored_result2(C *c) { | 419 void test_destructor_ignored_result2(C *c) { |
502 [c getStrongSmall]; | 420 [c getStrongSmall]; |
503 } | 421 } |
504 | 422 |
505 // CHECK: define{{.*}} void @test_copy_constructor_StrongBlock( | 423 // CHECK: define{{.*}} void @test_copy_constructor_StrongBlock( |
506 // CHECK: call void @__copy_constructor_8_8_sb0( | 424 // CHECK: call void @__copy_constructor_8_8_sb0( |
507 // CHECK: call void @__destructor_8_sb0( | 425 // CHECK: call void @__destructor_8_sb0( |
508 // CHECK: ret void | 426 // CHECK: ret void |
509 | 427 |
510 // CHECK: define linkonce_odr hidden void @__copy_constructor_8_8_sb0(i8** noundef %[[DST:.*]], i8** noundef %[[SRC:.*]]) | 428 // CHECK: define linkonce_odr hidden void @__copy_constructor_8_8_sb0(ptr noundef %[[DST:.*]], ptr noundef %[[SRC:.*]]) |
511 // CHECK: %[[DST_ADDR:.*]] = alloca i8**, align 8 | 429 // CHECK: %[[DST_ADDR:.*]] = alloca ptr, align 8 |
512 // CHECK: %[[SRC_ADDR:.*]] = alloca i8**, align 8 | 430 // CHECK: %[[SRC_ADDR:.*]] = alloca ptr, align 8 |
513 // CHECK: store i8** %[[DST]], i8*** %[[DST_ADDR]], align 8 | 431 // CHECK: store ptr %[[DST]], ptr %[[DST_ADDR]], align 8 |
514 // CHECK: store i8** %[[SRC]], i8*** %[[SRC_ADDR]], align 8 | 432 // CHECK: store ptr %[[SRC]], ptr %[[SRC_ADDR]], align 8 |
515 // CHECK: %[[V0:.*]] = load i8**, i8*** %[[DST_ADDR]], align 8 | 433 // CHECK: %[[V0:.*]] = load ptr, ptr %[[DST_ADDR]], align 8 |
516 // CHECK: %[[V1:.*]] = load i8**, i8*** %[[SRC_ADDR]], align 8 | 434 // CHECK: %[[V1:.*]] = load ptr, ptr %[[SRC_ADDR]], align 8 |
517 // CHECK: %[[V2:.*]] = load i8*, i8** %[[V1]], align 8 | 435 // CHECK: %[[V2:.*]] = load ptr, ptr %[[V1]], align 8 |
518 // CHECK: %[[V3:.*]] = call i8* @llvm.objc.retainBlock(i8* %[[V2]]) | 436 // CHECK: %[[V3:.*]] = call ptr @llvm.objc.retainBlock(ptr %[[V2]]) |
519 // CHECK: store i8* %[[V3]], i8** %[[V0]], align 8 | 437 // CHECK: store ptr %[[V3]], ptr %[[V0]], align 8 |
520 // CHECK: ret void | 438 // CHECK: ret void |
521 | 439 |
522 void test_copy_constructor_StrongBlock(StrongBlock *s) { | 440 void test_copy_constructor_StrongBlock(StrongBlock *s) { |
523 StrongBlock t = *s; | 441 StrongBlock t = *s; |
524 } | 442 } |
525 | 443 |
526 // CHECK: define{{.*}} void @test_copy_assignment_StrongBlock(%[[STRUCT_STRONGBLOCK]]* noundef %[[D:.*]], %[[STRUCT_STRONGBLOCK]]* noundef %[[S:.*]]) | 444 // CHECK: define{{.*}} void @test_copy_assignment_StrongBlock(ptr noundef %[[D:.*]], ptr noundef %[[S:.*]]) |
527 // CHECK: call void @__copy_assignment_8_8_sb0( | 445 // CHECK: call void @__copy_assignment_8_8_sb0( |
528 | 446 |
529 // CHECK: define linkonce_odr hidden void @__copy_assignment_8_8_sb0(i8** noundef %[[DST:.*]], i8** noundef %[[SRC:.*]]) | 447 // CHECK: define linkonce_odr hidden void @__copy_assignment_8_8_sb0(ptr noundef %[[DST:.*]], ptr noundef %[[SRC:.*]]) |
530 // CHECK: %[[DST_ADDR:.*]] = alloca i8**, align 8 | 448 // CHECK: %[[DST_ADDR:.*]] = alloca ptr, align 8 |
531 // CHECK: %[[SRC_ADDR:.*]] = alloca i8**, align 8 | 449 // CHECK: %[[SRC_ADDR:.*]] = alloca ptr, align 8 |
532 // CHECK: store i8** %[[DST]], i8*** %[[DST_ADDR]], align 8 | 450 // CHECK: store ptr %[[DST]], ptr %[[DST_ADDR]], align 8 |
533 // CHECK: store i8** %[[SRC]], i8*** %[[SRC_ADDR]], align 8 | 451 // CHECK: store ptr %[[SRC]], ptr %[[SRC_ADDR]], align 8 |
534 // CHECK: %[[V0:.*]] = load i8**, i8*** %[[DST_ADDR]], align 8 | 452 // CHECK: %[[V0:.*]] = load ptr, ptr %[[DST_ADDR]], align 8 |
535 // CHECK: %[[V1:.*]] = load i8**, i8*** %[[SRC_ADDR]], align 8 | 453 // CHECK: %[[V1:.*]] = load ptr, ptr %[[SRC_ADDR]], align 8 |
536 // CHECK: %[[V2:.*]] = load i8*, i8** %[[V1]], align 8 | 454 // CHECK: %[[V2:.*]] = load ptr, ptr %[[V1]], align 8 |
537 // CHECK: %[[V3:.*]] = call i8* @llvm.objc.retainBlock(i8* %[[V2]]) | 455 // CHECK: %[[V3:.*]] = call ptr @llvm.objc.retainBlock(ptr %[[V2]]) |
538 // CHECK: %[[V4:.*]] = load i8*, i8** %[[V0]], align 8 | 456 // CHECK: %[[V4:.*]] = load ptr, ptr %[[V0]], align 8 |
539 // CHECK: store i8* %[[V3]], i8** %[[V0]], align 8 | 457 // CHECK: store ptr %[[V3]], ptr %[[V0]], align 8 |
540 // CHECK: call void @llvm.objc.release(i8* %[[V4]]) | 458 // CHECK: call void @llvm.objc.release(ptr %[[V4]]) |
541 // CHECK: ret void | 459 // CHECK: ret void |
542 | 460 |
543 void test_copy_assignment_StrongBlock(StrongBlock *d, StrongBlock *s) { | 461 void test_copy_assignment_StrongBlock(StrongBlock *d, StrongBlock *s) { |
544 *d = *s; | 462 *d = *s; |
545 } | 463 } |
546 | 464 |
547 // CHECK-LABEL: define{{.*}} void @test_copy_assignment_StructWithBool( | 465 // CHECK-LABEL: define{{.*}} void @test_copy_assignment_StructWithBool( |
548 // CHECK: call void @__copy_assignment_8_8_AB0s1n2_tv0w8_AE_S_sv8_AB16s4n16_tv128w32_AE( | 466 // CHECK: call void @__copy_assignment_8_8_AB0s1n2_tv0w8_AE_S_sv8_AB16s4n16_tv128w32_AE( |
549 | 467 |
550 // CHECK-LABEL: define linkonce_odr hidden void @__copy_assignment_8_8_AB0s1n2_tv0w8_AE_S_sv8_AB16s4n16_tv128w32_AE( | 468 // CHECK-LABEL: define linkonce_odr hidden void @__copy_assignment_8_8_AB0s1n2_tv0w8_AE_S_sv8_AB16s4n16_tv128w32_AE( |
551 // CHECK: %[[ADDR_CUR:.*]] = phi i8** | 469 // CHECK: %[[ADDR_CUR:.*]] = phi ptr |
552 // CHECK: %[[ADDR_CUR1:.*]] = phi i8** | 470 // CHECK: %[[ADDR_CUR1:.*]] = phi ptr |
553 | 471 |
554 // CHECK: %[[V4:.*]] = bitcast i8** %[[ADDR_CUR]] to i8* | 472 // CHECK: %[[V6:.*]] = load volatile i8, ptr %[[ADDR_CUR1]], align 1 |
555 // CHECK: %[[V5:.*]] = bitcast i8** %[[ADDR_CUR1]] to i8* | |
556 // CHECK: %[[V6:.*]] = load volatile i8, i8* %[[V5]], align 1 | |
557 // CHECK: %[[TOBOOL:.*]] = trunc i8 %[[V6]] to i1 | 473 // CHECK: %[[TOBOOL:.*]] = trunc i8 %[[V6]] to i1 |
558 // CHECK: %[[FROMBOOL:.*]] = zext i1 %[[TOBOOL]] to i8 | 474 // CHECK: %[[FROMBOOL:.*]] = zext i1 %[[TOBOOL]] to i8 |
559 // CHECK: store volatile i8 %[[FROMBOOL]], i8* %[[V4]], align 1 | 475 // CHECK: store volatile i8 %[[FROMBOOL]], ptr %[[ADDR_CUR]], align 1 |
560 | 476 |
561 void test_copy_assignment_StructWithBool(StructWithBool *d, StructWithBool *s) { | 477 void test_copy_assignment_StructWithBool(StructWithBool *d, StructWithBool *s) { |
562 *d = *s; | 478 *d = *s; |
563 } | 479 } |
564 | 480 |
567 // CHECK-NOT: call | 483 // CHECK-NOT: call |
568 // CHECK: call void @__destructor_8_sv8( | 484 // CHECK: call void @__destructor_8_sv8( |
569 // CHECK-NOT: call | 485 // CHECK-NOT: call |
570 | 486 |
571 // CHECK: define linkonce_odr hidden void @__copy_constructor_8_8_t0w4_sv8( | 487 // CHECK: define linkonce_odr hidden void @__copy_constructor_8_8_t0w4_sv8( |
572 // CHECK: %[[V8:.*]] = load volatile i8*, i8** %{{.*}}, align 8 | 488 // CHECK: %[[V8:.*]] = load volatile ptr, ptr %{{.*}}, align 8 |
573 // CHECK: %[[V9:.*]] = call i8* @llvm.objc.retain(i8* %[[V8]]) | 489 // CHECK: %[[V9:.*]] = call ptr @llvm.objc.retain(ptr %[[V8]]) |
574 // CHECK: store volatile i8* %[[V9]], i8** %{{.*}}, align 8 | 490 // CHECK: store volatile ptr %[[V9]], ptr %{{.*}}, align 8 |
575 | 491 |
576 void test_copy_constructor_StrongVolatile0(StrongVolatile *s) { | 492 void test_copy_constructor_StrongVolatile0(StrongVolatile *s) { |
577 StrongVolatile t = *s; | 493 StrongVolatile t = *s; |
578 } | 494 } |
579 | 495 |
589 // CHECK: call void @__copy_constructor_8_8_t0w16_s16( | 505 // CHECK: call void @__copy_constructor_8_8_t0w16_s16( |
590 // CHECK: call void @__destructor_8_s16( | 506 // CHECK: call void @__destructor_8_s16( |
591 // CHECK: call void @__destructor_8_s16( | 507 // CHECK: call void @__destructor_8_s16( |
592 // CHECK: ret void | 508 // CHECK: ret void |
593 | 509 |
594 // CHECK: define linkonce_odr hidden void @__copy_helper_block_8_32n13_8_8_t0w16_s16(i8* noundef %0, i8* noundef %1) | 510 // CHECK: define linkonce_odr hidden void @__copy_helper_block_8_32n13_8_8_t0w16_s16(ptr noundef %0, ptr noundef %1) |
595 // CHECK: call void @__copy_constructor_8_8_t0w16_s16( | 511 // CHECK: call void @__copy_constructor_8_8_t0w16_s16( |
596 // CHECK: ret void | 512 // CHECK: ret void |
597 | 513 |
598 // CHECK: define linkonce_odr hidden void @__destroy_helper_block_8_32n5_8_s16( | 514 // CHECK: define linkonce_odr hidden void @__destroy_helper_block_8_32n5_8_s16( |
599 // CHECK: call void @__destructor_8_s16( | 515 // CHECK: call void @__destructor_8_s16( |
604 BlockTy b = ^(void){ (void)t; }; | 520 BlockTy b = ^(void){ (void)t; }; |
605 } | 521 } |
606 | 522 |
607 // CHECK: define{{.*}} void @test_variable_length_array(i32 noundef %[[N:.*]]) | 523 // CHECK: define{{.*}} void @test_variable_length_array(i32 noundef %[[N:.*]]) |
608 // CHECK: %[[N_ADDR:.*]] = alloca i32, align 4 | 524 // CHECK: %[[N_ADDR:.*]] = alloca i32, align 4 |
609 // CHECK: store i32 %[[N]], i32* %[[N_ADDR]], align 4 | 525 // CHECK: store i32 %[[N]], ptr %[[N_ADDR]], align 4 |
610 // CHECK: %[[V0:.*]] = load i32, i32* %[[N_ADDR]], align 4 | 526 // CHECK: %[[V0:.*]] = load i32, ptr %[[N_ADDR]], align 4 |
611 // CHECK: %[[V1:.*]] = zext i32 %[[V0]] to i64 | 527 // CHECK: %[[V1:.*]] = zext i32 %[[V0]] to i64 |
612 // CHECK: %[[VLA:.*]] = alloca %[[STRUCT_STRONG]], i64 %[[V1]], align 8 | 528 // CHECK: %[[VLA:.*]] = alloca %[[STRUCT_STRONG]], i64 %[[V1]], align 8 |
613 // CHECK: %[[V3:.*]] = bitcast %[[STRUCT_STRONG]]* %[[VLA]] to i8** | |
614 // CHECK: %[[V4:.*]] = mul nuw i64 24, %[[V1]] | 529 // CHECK: %[[V4:.*]] = mul nuw i64 24, %[[V1]] |
615 // CHECK: %[[V5:.*]] = bitcast i8** %[[V3]] to i8* | 530 // CHECK: %[[V6:.*]] = getelementptr inbounds i8, ptr %[[VLA]], i64 %[[V4]] |
616 // CHECK: %[[V6:.*]] = getelementptr inbounds i8, i8* %[[V5]], i64 %[[V4]] | |
617 // CHECK: %[[DSTARRAY_END:.*]] = bitcast i8* %[[V6]] to i8** | |
618 // CHECK: br label | 531 // CHECK: br label |
619 | 532 |
620 // CHECK: %[[DSTADDR_CUR:.*]] = phi i8** [ %[[V3]], {{.*}} ], [ %[[V7:.*]], {{.*}} ] | 533 // CHECK: %[[DSTADDR_CUR:.*]] = phi ptr [ %[[VLA]], {{.*}} ], [ %[[V7:.*]], {{.*}} ] |
621 // CHECK: %[[DONE:.*]] = icmp eq i8** %[[DSTADDR_CUR]], %[[DSTARRAY_END]] | 534 // CHECK: %[[DONE:.*]] = icmp eq ptr %[[DSTADDR_CUR]], %[[V6]] |
622 // CHECK: br i1 %[[DONE]], label | 535 // CHECK: br i1 %[[DONE]], label |
623 | 536 |
624 // CHECK: call void @__default_constructor_8_s16(i8** %[[DSTADDR_CUR]]) | 537 // CHECK: call void @__default_constructor_8_s16(ptr %[[DSTADDR_CUR]]) |
625 // CHECK: %[[V8:.*]] = bitcast i8** %[[DSTADDR_CUR]] to i8* | 538 // CHECK: %[[V9:.*]] = getelementptr inbounds i8, ptr %[[DSTADDR_CUR]], i64 24 |
626 // CHECK: %[[V9:.*]] = getelementptr inbounds i8, i8* %[[V8]], i64 24 | |
627 // CHECK: %[[V7]] = bitcast i8* %[[V9]] to i8** | |
628 // CHECK: br label | 539 // CHECK: br label |
629 | 540 |
630 // CHECK: call void @func(%[[STRUCT_STRONG]]* noundef %[[VLA]]) | 541 // CHECK: call void @func(ptr noundef %[[VLA]]) |
631 // CHECK: %[[V10:.*]] = getelementptr inbounds %[[STRUCT_STRONG]], %[[STRUCT_STRONG]]* %[[VLA]], i64 %[[V1]] | 542 // CHECK: %[[V10:.*]] = getelementptr inbounds %[[STRUCT_STRONG]], ptr %[[VLA]], i64 %[[V1]] |
632 // CHECK: %[[ARRAYDESTROY_ISEMPTY:.*]] = icmp eq %[[STRUCT_STRONG]]* %[[VLA]], %[[V10]] | 543 // CHECK: %[[ARRAYDESTROY_ISEMPTY:.*]] = icmp eq ptr %[[VLA]], %[[V10]] |
633 // CHECK: br i1 %[[ARRAYDESTROY_ISEMPTY]], label | 544 // CHECK: br i1 %[[ARRAYDESTROY_ISEMPTY]], label |
634 | 545 |
635 // CHECK: %[[ARRAYDESTROY_ELEMENTPAST:.*]] = phi %[[STRUCT_STRONG]]* [ %[[V10]], {{.*}} ], [ %[[ARRAYDESTROY_ELEMENT:.*]], {{.*}} ] | 546 // CHECK: %[[ARRAYDESTROY_ELEMENTPAST:.*]] = phi ptr [ %[[V10]], {{.*}} ], [ %[[ARRAYDESTROY_ELEMENT:.*]], {{.*}} ] |
636 // CHECK: %[[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds %[[STRUCT_STRONG]], %[[STRUCT_STRONG]]* %[[ARRAYDESTROY_ELEMENTPAST]], i64 -1 | 547 // CHECK: %[[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds %[[STRUCT_STRONG]], ptr %[[ARRAYDESTROY_ELEMENTPAST]], i64 -1 |
637 // CHECK: %[[V11:.*]] = bitcast %[[STRUCT_STRONG]]* %[[ARRAYDESTROY_ELEMENT]] to i8** | 548 // CHECK: call void @__destructor_8_s16(ptr %[[ARRAYDESTROY_ELEMENT]]) |
638 // CHECK: call void @__destructor_8_s16(i8** %[[V11]]) | 549 // CHECK: %[[ARRAYDESTROY_DONE:.*]] = icmp eq ptr %[[ARRAYDESTROY_ELEMENT]], %[[VLA]] |
639 // CHECK: %[[ARRAYDESTROY_DONE:.*]] = icmp eq %[[STRUCT_STRONG]]* %[[ARRAYDESTROY_ELEMENT]], %[[VLA]] | |
640 // CHECK: br i1 %[[ARRAYDESTROY_DONE]], label | 550 // CHECK: br i1 %[[ARRAYDESTROY_DONE]], label |
641 | 551 |
642 // CHECK: ret void | 552 // CHECK: ret void |
643 | 553 |
644 void test_variable_length_array(int n) { | 554 void test_variable_length_array(int n) { |
645 Strong a[n]; | 555 Strong a[n]; |
646 func(a); | 556 func(a); |
647 } | 557 } |
648 | 558 |
649 // CHECK: define linkonce_odr hidden void @__default_constructor_8_AB8s8n4_s8_AE( | 559 // CHECK: define linkonce_odr hidden void @__default_constructor_8_AB8s8n4_s8_AE( |
650 // CHECK: call void @llvm.memset.p0i8.i64(i8* align 8 %{{.*}}, i8 0, i64 32, i1 false) | 560 // CHECK: call void @llvm.memset.p0.i64(ptr align 8 %{{.*}}, i8 0, i64 32, i1 false) |
651 void test_constructor_destructor_IDArray(void) { | 561 void test_constructor_destructor_IDArray(void) { |
652 IDArray t; | 562 IDArray t; |
653 } | 563 } |
654 | 564 |
655 // CHECK: define linkonce_odr hidden void @__default_constructor_8_AB8s24n4_S_s24_AE( | 565 // CHECK: define linkonce_odr hidden void @__default_constructor_8_AB8s24n4_S_s24_AE( |
657 StructArray t; | 567 StructArray t; |
658 } | 568 } |
659 | 569 |
660 // Test that StructArray's field 'd' is copied before entering the loop. | 570 // Test that StructArray's field 'd' is copied before entering the loop. |
661 | 571 |
662 // CHECK: define linkonce_odr hidden void @__copy_constructor_8_8_t0w8_AB8s24n4_S_t8w16_s24_AE(i8** noundef %[[DST:.*]], i8** noundef %[[SRC:.*]]) | 572 // CHECK: define linkonce_odr hidden void @__copy_constructor_8_8_t0w8_AB8s24n4_S_t8w16_s24_AE(ptr noundef %[[DST:.*]], ptr noundef %[[SRC:.*]]) |
663 // CHECK: entry: | 573 // CHECK: entry: |
664 // CHECK: %[[DST_ADDR:.*]] = alloca i8**, align 8 | 574 // CHECK: %[[DST_ADDR:.*]] = alloca ptr, align 8 |
665 // CHECK: %[[SRC_ADDR:.*]] = alloca i8**, align 8 | 575 // CHECK: %[[SRC_ADDR:.*]] = alloca ptr, align 8 |
666 // CHECK: store i8** %[[DST]], i8*** %[[DST_ADDR]], align 8 | 576 // CHECK: store ptr %[[DST]], ptr %[[DST_ADDR]], align 8 |
667 // CHECK: store i8** %[[SRC]], i8*** %[[SRC_ADDR]], align 8 | 577 // CHECK: store ptr %[[SRC]], ptr %[[SRC_ADDR]], align 8 |
668 // CHECK: %[[V0:.*]] = load i8**, i8*** %[[DST_ADDR]], align 8 | 578 // CHECK: %[[V0:.*]] = load ptr, ptr %[[DST_ADDR]], align 8 |
669 // CHECK: %[[V1:.*]] = load i8**, i8*** %[[SRC_ADDR]], align 8 | 579 // CHECK: %[[V1:.*]] = load ptr, ptr %[[SRC_ADDR]], align 8 |
670 // CHECK: %[[V2:.*]] = bitcast i8** %[[V0]] to i64* | 580 // CHECK: %[[V4:.*]] = load i64, ptr %[[V1]], align 8 |
671 // CHECK: %[[V3:.*]] = bitcast i8** %[[V1]] to i64* | 581 // CHECK: store i64 %[[V4]], ptr %[[V0]], align 8 |
672 // CHECK: %[[V4:.*]] = load i64, i64* %[[V3]], align 8 | 582 |
673 // CHECK: store i64 %[[V4]], i64* %[[V2]], align 8 | 583 // CHECK: phi ptr |
674 | 584 // CHECK: phi ptr |
675 // CHECK: phi i8** | 585 |
676 // CHECK: phi i8** | 586 // CHECK: phi ptr |
677 | 587 // CHECK: phi ptr |
678 // CHECK: phi i8** | 588 |
679 // CHECK: phi i8** | 589 // CHECK-NOT: load i64, ptr % |
680 | |
681 // CHECK-NOT: load i64, i64* % | |
682 // CHECK-NOT: store i64 % | 590 // CHECK-NOT: store i64 % |
683 // CHECK: call void @__copy_constructor_8_8_t0w16_s16( | 591 // CHECK: call void @__copy_constructor_8_8_t0w16_s16( |
684 | 592 |
685 void test_copy_constructor_StructArray(StructArray a) { | 593 void test_copy_constructor_StructArray(StructArray a) { |
686 StructArray t = a; | 594 StructArray t = a; |
689 // Check that IRGen copies the 9-bit bitfield emitting i16 load and store. | 597 // Check that IRGen copies the 9-bit bitfield emitting i16 load and store. |
690 | 598 |
691 // CHECK: define{{.*}} void @test_copy_constructor_Bitfield0( | 599 // CHECK: define{{.*}} void @test_copy_constructor_Bitfield0( |
692 | 600 |
693 // CHECK: define linkonce_odr hidden void @__copy_constructor_8_8_s0_t8w2( | 601 // CHECK: define linkonce_odr hidden void @__copy_constructor_8_8_s0_t8w2( |
694 // CHECK: %[[V4:.*]] = bitcast i8** %{{.*}} to i8* | 602 // CHECK: %[[V5:.*]] = getelementptr inbounds i8, ptr %{{.*}}, i64 8 |
695 // CHECK: %[[V5:.*]] = getelementptr inbounds i8, i8* %[[V4]], i64 8 | 603 // CHECK: %[[V8:.*]] = getelementptr inbounds i8, ptr %{{.*}}, i64 8 |
696 // CHECK: %[[V6:.*]] = bitcast i8* %[[V5]] to i8** | 604 // CHECK: %[[V12:.*]] = load i16, ptr %[[V8]], align 8 |
697 // CHECK: %[[V7:.*]] = bitcast i8** %{{.*}} to i8* | 605 // CHECK: store i16 %[[V12]], ptr %[[V5]], align 8 |
698 // CHECK: %[[V8:.*]] = getelementptr inbounds i8, i8* %[[V7]], i64 8 | |
699 // CHECK: %[[V9:.*]] = bitcast i8* %[[V8]] to i8** | |
700 // CHECK: %[[V10:.*]] = bitcast i8** %[[V6]] to i16* | |
701 // CHECK: %[[V11:.*]] = bitcast i8** %[[V9]] to i16* | |
702 // CHECK: %[[V12:.*]] = load i16, i16* %[[V11]], align 8 | |
703 // CHECK: store i16 %[[V12]], i16* %[[V10]], align 8 | |
704 // CHECK: ret void | 606 // CHECK: ret void |
705 | 607 |
706 void test_copy_constructor_Bitfield0(Bitfield0 *a) { | 608 void test_copy_constructor_Bitfield0(Bitfield0 *a) { |
707 Bitfield0 t = *a; | 609 Bitfield0 t = *a; |
708 } | 610 } |
709 | 611 |
710 // CHECK: define linkonce_odr hidden void @__copy_constructor_8_8_t0w2_s8_t16w4_s24_t32w12_s48_t56w9_tv513w2_tv520w8 | 612 // CHECK: define linkonce_odr hidden void @__copy_constructor_8_8_t0w2_s8_t16w4_s24_t32w12_s48_t56w9_tv513w2_tv520w8 |
711 // CHECK: %[[V4:.*]] = load i16, i16* %{{.*}}, align 8 | 613 // CHECK: %[[V4:.*]] = load i16, ptr %{{.*}}, align 8 |
712 // CHECK: store i16 %[[V4]], i16* %{{.*}}, align 8 | 614 // CHECK: store i16 %[[V4]], ptr %{{.*}}, align 8 |
713 // CHECK: %[[V21:.*]] = load i32, i32* %{{.*}}, align 8 | 615 // CHECK: %[[V21:.*]] = load i32, ptr %{{.*}}, align 8 |
714 // CHECK: store i32 %[[V21]], i32* %{{.*}}, align 8 | 616 // CHECK: store i32 %[[V21]], ptr %{{.*}}, align 8 |
715 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %{{.*}}, i8* align 8 %{{.*}}, i64 12, i1 false) | 617 // CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 %{{.*}}, ptr align 8 %{{.*}}, i64 12, i1 false) |
716 // CHECK: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 %{{.*}}, i8* align 8 %{{.*}}, i64 9, i1 false) | 618 // CHECK: call void @llvm.memcpy.p0.p0.i64(ptr align 8 %{{.*}}, ptr align 8 %{{.*}}, i64 9, i1 false) |
717 // CHECK: %[[V54:.*]] = bitcast i8** %[[V0:.*]] to %[[STRUCT_BITFIELD1]]* | 619 // CHECK: %[[I5:.*]] = getelementptr inbounds %[[STRUCT_BITFIELD1]], ptr %[[V0:.*]], i32 0, i32 8 |
718 // CHECK: %[[I5:.*]] = getelementptr inbounds %[[STRUCT_BITFIELD1]], %[[STRUCT_BITFIELD1]]* %[[V54]], i32 0, i32 8 | 620 // CHECK: %[[I51:.*]] = getelementptr inbounds %[[STRUCT_BITFIELD1]], ptr %[[V1:.*]], i32 0, i32 8 |
719 // CHECK: %[[V55:.*]] = bitcast i8** %[[V1:.*]] to %[[STRUCT_BITFIELD1]]* | 621 // CHECK: %[[BF_LOAD:.*]] = load volatile i8, ptr %[[I51]], align 8 |
720 // CHECK: %[[I51:.*]] = getelementptr inbounds %[[STRUCT_BITFIELD1]], %[[STRUCT_BITFIELD1]]* %[[V55]], i32 0, i32 8 | |
721 // CHECK: %[[BF_LOAD:.*]] = load volatile i8, i8* %[[I51]], align 8 | |
722 // CHECK: %[[BF_SHL:.*]] = shl i8 %[[BF_LOAD]], 5 | 622 // CHECK: %[[BF_SHL:.*]] = shl i8 %[[BF_LOAD]], 5 |
723 // CHECK: %[[BF_ASHR:.*]] = ashr i8 %[[BF_SHL]], 6 | 623 // CHECK: %[[BF_ASHR:.*]] = ashr i8 %[[BF_SHL]], 6 |
724 // CHECK: %[[BF_CAST:.*]] = sext i8 %[[BF_ASHR]] to i32 | 624 // CHECK: %[[BF_CAST:.*]] = sext i8 %[[BF_ASHR]] to i32 |
725 // CHECK: %[[V56:.*]] = trunc i32 %[[BF_CAST]] to i8 | 625 // CHECK: %[[V56:.*]] = trunc i32 %[[BF_CAST]] to i8 |
726 // CHECK: %[[BF_LOAD2:.*]] = load volatile i8, i8* %[[I5]], align 8 | 626 // CHECK: %[[BF_LOAD2:.*]] = load volatile i8, ptr %[[I5]], align 8 |
727 // CHECK: %[[BF_VALUE:.*]] = and i8 %[[V56]], 3 | 627 // CHECK: %[[BF_VALUE:.*]] = and i8 %[[V56]], 3 |
728 // CHECK: %[[BF_SHL3:.*]] = shl i8 %[[BF_VALUE]], 1 | 628 // CHECK: %[[BF_SHL3:.*]] = shl i8 %[[BF_VALUE]], 1 |
729 // CHECK: %[[BF_CLEAR:.*]] = and i8 %[[BF_LOAD2]], -7 | 629 // CHECK: %[[BF_CLEAR:.*]] = and i8 %[[BF_LOAD2]], -7 |
730 // CHECK: %[[BF_SET:.*]] = or i8 %[[BF_CLEAR]], %[[BF_SHL3]] | 630 // CHECK: %[[BF_SET:.*]] = or i8 %[[BF_CLEAR]], %[[BF_SHL3]] |
731 // CHECK: store volatile i8 %[[BF_SET]], i8* %[[I5]], align 8 | 631 // CHECK: store volatile i8 %[[BF_SET]], ptr %[[I5]], align 8 |
732 // CHECK: %[[V57:.*]] = bitcast i8** %[[V0]] to %[[STRUCT_BITFIELD1]]* | 632 // CHECK: %[[I6:.*]] = getelementptr inbounds %[[STRUCT_BITFIELD1]], ptr %[[V0]], i32 0, i32 9 |
733 // CHECK: %[[I6:.*]] = getelementptr inbounds %[[STRUCT_BITFIELD1]], %[[STRUCT_BITFIELD1]]* %[[V57]], i32 0, i32 9 | 633 // CHECK: %[[I64:.*]] = getelementptr inbounds %[[STRUCT_BITFIELD1]], ptr %[[V1]], i32 0, i32 9 |
734 // CHECK: %[[V58:.*]] = bitcast i8** %[[V1]] to %[[STRUCT_BITFIELD1]]* | 634 // CHECK: %[[V59:.*]] = load volatile i8, ptr %[[I64]], align 1 |
735 // CHECK: %[[I64:.*]] = getelementptr inbounds %[[STRUCT_BITFIELD1]], %[[STRUCT_BITFIELD1]]* %[[V58]], i32 0, i32 9 | 635 // CHECK: store volatile i8 %[[V59]], ptr %[[I6]], align 1 |
736 // CHECK: %[[V59:.*]] = load volatile i8, i8* %[[I64]], align 1 | |
737 // CHECK: store volatile i8 %[[V59]], i8* %[[I6]], align 1 | |
738 | 636 |
739 void test_copy_constructor_Bitfield1(Bitfield1 *a) { | 637 void test_copy_constructor_Bitfield1(Bitfield1 *a) { |
740 Bitfield1 t = *a; | 638 Bitfield1 t = *a; |
741 } | 639 } |
742 | 640 |
743 // CHECK: define{{.*}} void @test_copy_constructor_VolatileArray( | 641 // CHECK: define{{.*}} void @test_copy_constructor_VolatileArray( |
744 // CHECK: call void @__copy_constructor_8_8_s0_AB8s4n16_tv64w32_AE( | 642 // CHECK: call void @__copy_constructor_8_8_s0_AB8s4n16_tv64w32_AE( |
745 | 643 |
746 // CHECK: define linkonce_odr hidden void @__copy_constructor_8_8_s0_AB8s4n16_tv64w32_AE( | 644 // CHECK: define linkonce_odr hidden void @__copy_constructor_8_8_s0_AB8s4n16_tv64w32_AE( |
747 // CHECK: %[[ADDR_CUR:.*]] = phi i8** | 645 // CHECK: %[[ADDR_CUR:.*]] = phi ptr |
748 // CHECK: %[[ADDR_CUR1:.*]] = phi i8** | 646 // CHECK: %[[ADDR_CUR1:.*]] = phi ptr |
749 // CHECK: %[[V12:.*]] = bitcast i8** %[[ADDR_CUR]] to i32* | 647 // CHECK: %[[V14:.*]] = load volatile i32, ptr %[[ADDR_CUR1]], align 4 |
750 // CHECK: %[[V13:.*]] = bitcast i8** %[[ADDR_CUR1]] to i32* | 648 // CHECK: store volatile i32 %[[V14]], ptr %[[ADDR_CUR]], align 4 |
751 // CHECK: %[[V14:.*]] = load volatile i32, i32* %[[V13]], align 4 | |
752 // CHECK: store volatile i32 %[[V14]], i32* %[[V12]], align 4 | |
753 | 649 |
754 void test_copy_constructor_VolatileArray(VolatileArray *a) { | 650 void test_copy_constructor_VolatileArray(VolatileArray *a) { |
755 VolatileArray t = *a; | 651 VolatileArray t = *a; |
756 } | 652 } |
757 | 653 |
758 // CHECK: define{{.*}} void @test_compound_literal0( | 654 // CHECK: define{{.*}} void @test_compound_literal0( |
759 // CHECK: %[[P:.*]] = alloca %[[STRUCT_STRONGSMALL]]*, align 8 | 655 // CHECK: %[[P:.*]] = alloca ptr, align 8 |
760 // CHECK: %[[_COMPOUNDLITERAL:.*]] = alloca %[[STRUCT_STRONGSMALL]], align 8 | 656 // CHECK: %[[_COMPOUNDLITERAL:.*]] = alloca %[[STRUCT_STRONGSMALL]], align 8 |
761 // CHECK: %[[CLEANUP_COND:.*]] = alloca i1, align 1 | 657 // CHECK: %[[CLEANUP_COND:.*]] = alloca i1, align 1 |
762 // CHECK: %[[_COMPOUNDLITERAL1:.*]] = alloca %[[STRUCT_STRONGSMALL]], align 8 | 658 // CHECK: %[[_COMPOUNDLITERAL1:.*]] = alloca %[[STRUCT_STRONGSMALL]], align 8 |
763 // CHECK: %[[CLEANUP_COND4:.*]] = alloca i1, align 1 | 659 // CHECK: %[[CLEANUP_COND4:.*]] = alloca i1, align 1 |
764 | 660 |
765 // CHECK: %[[I:.*]] = getelementptr inbounds %[[STRUCT_STRONGSMALL]], %[[STRUCT_STRONGSMALL]]* %[[_COMPOUNDLITERAL]], i32 0, i32 0 | 661 // CHECK: %[[I:.*]] = getelementptr inbounds %[[STRUCT_STRONGSMALL]], ptr %[[_COMPOUNDLITERAL]], i32 0, i32 0 |
766 // CHECK: store i32 1, i32* %[[I]], align 8 | 662 // CHECK: store i32 1, ptr %[[I]], align 8 |
767 // CHECK: %[[F1:.*]] = getelementptr inbounds %[[STRUCT_STRONGSMALL]], %[[STRUCT_STRONGSMALL]]* %[[_COMPOUNDLITERAL]], i32 0, i32 1 | 663 // CHECK: %[[F1:.*]] = getelementptr inbounds %[[STRUCT_STRONGSMALL]], ptr %[[_COMPOUNDLITERAL]], i32 0, i32 1 |
768 // CHECK: store i8* null, i8** %[[F1]], align 8 | 664 // CHECK: store ptr null, ptr %[[F1]], align 8 |
769 // CHECK: store i1 true, i1* %[[CLEANUP_COND]], align 1 | 665 // CHECK: store i1 true, ptr %[[CLEANUP_COND]], align 1 |
770 | 666 |
771 // CHECK: %[[I2:.*]] = getelementptr inbounds %[[STRUCT_STRONGSMALL]], %[[STRUCT_STRONGSMALL]]* %[[_COMPOUNDLITERAL1]], i32 0, i32 0 | 667 // CHECK: %[[I2:.*]] = getelementptr inbounds %[[STRUCT_STRONGSMALL]], ptr %[[_COMPOUNDLITERAL1]], i32 0, i32 0 |
772 // CHECK: store i32 2, i32* %[[I2]], align 8 | 668 // CHECK: store i32 2, ptr %[[I2]], align 8 |
773 // CHECK: %[[F13:.*]] = getelementptr inbounds %[[STRUCT_STRONGSMALL]], %[[STRUCT_STRONGSMALL]]* %[[_COMPOUNDLITERAL1]], i32 0, i32 1 | 669 // CHECK: %[[F13:.*]] = getelementptr inbounds %[[STRUCT_STRONGSMALL]], ptr %[[_COMPOUNDLITERAL1]], i32 0, i32 1 |
774 // CHECK: store i8* null, i8** %[[F13]], align 8 | 670 // CHECK: store ptr null, ptr %[[F13]], align 8 |
775 // CHECK: store i1 true, i1* %[[CLEANUP_COND4]], align 1 | 671 // CHECK: store i1 true, ptr %[[CLEANUP_COND4]], align 1 |
776 | 672 |
777 // CHECK: %[[COND:.*]] = phi %[[STRUCT_STRONGSMALL]]* [ %[[_COMPOUNDLITERAL]], %{{.*}} ], [ %[[_COMPOUNDLITERAL1]], %{{.*}} ] | 673 // CHECK: %[[COND:.*]] = phi ptr [ %[[_COMPOUNDLITERAL]], %{{.*}} ], [ %[[_COMPOUNDLITERAL1]], %{{.*}} ] |
778 // CHECK: store %[[STRUCT_STRONGSMALL]]* %[[COND]], %[[STRUCT_STRONGSMALL]]** %[[P]], align 8 | 674 // CHECK: store ptr %[[COND]], ptr %[[P]], align 8 |
779 // CHECK: call void @func( | 675 // CHECK: call void @func( |
780 | 676 |
781 // CHECK: %[[V1:.*]] = bitcast %[[STRUCT_STRONGSMALL]]* %[[_COMPOUNDLITERAL1]] to i8** | 677 // CHECK: call void @__destructor_8_s8(ptr %[[_COMPOUNDLITERAL1]]) |
782 // CHECK: call void @__destructor_8_s8(i8** %[[V1]]) | 678 |
783 | 679 // CHECK: call void @__destructor_8_s8(ptr %[[_COMPOUNDLITERAL]]) |
784 // CHECK: %[[V2:.*]] = bitcast %[[STRUCT_STRONGSMALL]]* %[[_COMPOUNDLITERAL]] to i8** | |
785 // CHECK: call void @__destructor_8_s8(i8** %[[V2]]) | |
786 | 680 |
787 void test_compound_literal0(int c) { | 681 void test_compound_literal0(int c) { |
788 StrongSmall *p = c ? &(StrongSmall){ 1, 0 } : &(StrongSmall){ 2, 0 }; | 682 StrongSmall *p = c ? &(StrongSmall){ 1, 0 } : &(StrongSmall){ 2, 0 }; |
789 func(0); | 683 func(0); |
790 } | 684 } |
792 // Check that there is only one destructor call, which destructs 't'. | 686 // Check that there is only one destructor call, which destructs 't'. |
793 | 687 |
794 // CHECK: define{{.*}} void @test_compound_literal1( | 688 // CHECK: define{{.*}} void @test_compound_literal1( |
795 // CHECK: %[[T:.*]] = alloca %[[STRUCT_STRONGSMALL]], align 8 | 689 // CHECK: %[[T:.*]] = alloca %[[STRUCT_STRONGSMALL]], align 8 |
796 | 690 |
797 // CHECK: %[[I:.*]] = getelementptr inbounds %[[STRUCT_STRONGSMALL]], %[[STRUCT_STRONGSMALL]]* %[[T]], i32 0, i32 0 | 691 // CHECK: %[[I:.*]] = getelementptr inbounds %[[STRUCT_STRONGSMALL]], ptr %[[T]], i32 0, i32 0 |
798 // CHECK: store i32 1, i32* %[[I]], align 8 | 692 // CHECK: store i32 1, ptr %[[I]], align 8 |
799 // CHECK: %[[F1:.*]] = getelementptr inbounds %[[STRUCT_STRONGSMALL]], %[[STRUCT_STRONGSMALL]]* %[[T]], i32 0, i32 1 | 693 // CHECK: %[[F1:.*]] = getelementptr inbounds %[[STRUCT_STRONGSMALL]], ptr %[[T]], i32 0, i32 1 |
800 // CHECK: store i8* null, i8** %[[F1]], align 8 | 694 // CHECK: store ptr null, ptr %[[F1]], align 8 |
801 | 695 |
802 // CHECK: %[[I1:.*]] = getelementptr inbounds %[[STRUCT_STRONGSMALL]], %[[STRUCT_STRONGSMALL]]* %[[T]], i32 0, i32 0 | 696 // CHECK: %[[I1:.*]] = getelementptr inbounds %[[STRUCT_STRONGSMALL]], ptr %[[T]], i32 0, i32 0 |
803 // CHECK: store i32 2, i32* %[[I1]], align 8 | 697 // CHECK: store i32 2, ptr %[[I1]], align 8 |
804 // CHECK: %[[F12:.*]] = getelementptr inbounds %[[STRUCT_STRONGSMALL]], %[[STRUCT_STRONGSMALL]]* %[[T]], i32 0, i32 1 | 698 // CHECK: %[[F12:.*]] = getelementptr inbounds %[[STRUCT_STRONGSMALL]], ptr %[[T]], i32 0, i32 1 |
805 // CHECK: store i8* null, i8** %[[F12]], align 8 | 699 // CHECK: store ptr null, ptr %[[F12]], align 8 |
806 | 700 |
807 // CHECK: call void @func( | 701 // CHECK: call void @func( |
808 // CHECK-NOT: call void | 702 // CHECK-NOT: call void |
809 // CHECK: %[[V1:.*]] = bitcast %[[STRUCT_STRONGSMALL]]* %[[T]] to i8** | 703 // CHECK: call void @__destructor_8_s8(ptr %[[T]]) |
810 // CHECK: call void @__destructor_8_s8(i8** %[[V1]]) | |
811 // CHECK-NOT: call void | 704 // CHECK-NOT: call void |
812 | 705 |
813 void test_compound_literal1(int c) { | 706 void test_compound_literal1(int c) { |
814 StrongSmall t = c ? (StrongSmall){ 1, 0 } : (StrongSmall){ 2, 0 }; | 707 StrongSmall t = c ? (StrongSmall){ 1, 0 } : (StrongSmall){ 2, 0 }; |
815 func(0); | 708 func(0); |
816 } | 709 } |
817 | 710 |
818 // CHECK: define{{.*}} void @test_compound_literal2( | 711 // CHECK: define{{.*}} void @test_compound_literal2( |
819 // CHECK: %[[P_ADDR:.*]] = alloca %[[STRUCT_STRONGSMALL]]*, align 8 | 712 // CHECK: %[[P_ADDR:.*]] = alloca ptr, align 8 |
820 // CHECK: %[[_COMPOUNDLITERAL:.*]] = alloca %[[STRUCT_STRONGSMALL]], align 8 | 713 // CHECK: %[[_COMPOUNDLITERAL:.*]] = alloca %[[STRUCT_STRONGSMALL]], align 8 |
821 // CHECK: %[[CLEANUP_COND:.*]] = alloca i1, align 1 | 714 // CHECK: %[[CLEANUP_COND:.*]] = alloca i1, align 1 |
822 // CHECK: %[[_COMPOUNDLITERAL1:.*]] = alloca %[[STRUCT_STRONGSMALL]], align 8 | 715 // CHECK: %[[_COMPOUNDLITERAL1:.*]] = alloca %[[STRUCT_STRONGSMALL]], align 8 |
823 // CHECK: %[[CLEANUP_COND4:.*]] = alloca i1, align 1 | 716 // CHECK: %[[CLEANUP_COND4:.*]] = alloca i1, align 1 |
824 // CHECK: %[[V0:.*]] = load %[[STRUCT_STRONGSMALL]]*, %[[STRUCT_STRONGSMALL]]** %[[P_ADDR]], align 8 | 717 // CHECK: %[[V0:.*]] = load ptr, ptr %[[P_ADDR]], align 8 |
825 | 718 |
826 // CHECK: %[[I:.*]] = getelementptr inbounds %[[STRUCT_STRONGSMALL]], %[[STRUCT_STRONGSMALL]]* %[[_COMPOUNDLITERAL]], i32 0, i32 0 | 719 // CHECK: %[[I:.*]] = getelementptr inbounds %[[STRUCT_STRONGSMALL]], ptr %[[_COMPOUNDLITERAL]], i32 0, i32 0 |
827 // CHECK: store i32 1, i32* %[[I]], align 8 | 720 // CHECK: store i32 1, ptr %[[I]], align 8 |
828 // CHECK: %[[F1:.*]] = getelementptr inbounds %[[STRUCT_STRONGSMALL]], %[[STRUCT_STRONGSMALL]]* %[[_COMPOUNDLITERAL]], i32 0, i32 1 | 721 // CHECK: %[[F1:.*]] = getelementptr inbounds %[[STRUCT_STRONGSMALL]], ptr %[[_COMPOUNDLITERAL]], i32 0, i32 1 |
829 // CHECK: store i8* null, i8** %[[F1]], align 8 | 722 // CHECK: store ptr null, ptr %[[F1]], align 8 |
830 // CHECK: store i1 true, i1* %[[CLEANUP_COND]], align 1 | 723 // CHECK: store i1 true, ptr %[[CLEANUP_COND]], align 1 |
831 // CHECK: %[[V2:.*]] = bitcast %[[STRUCT_STRONGSMALL]]* %[[V0]] to i8** | 724 // CHECK: call void @__copy_assignment_8_8_t0w4_s8(ptr %[[V0]], ptr %[[_COMPOUNDLITERAL]]) |
832 // CHECK: %[[V3:.*]] = bitcast %[[STRUCT_STRONGSMALL]]* %[[_COMPOUNDLITERAL]] to i8** | 725 |
833 // CHECK: call void @__copy_assignment_8_8_t0w4_s8(i8** %[[V2]], i8** %[[V3]]) | 726 // CHECK: %[[I2:.*]] = getelementptr inbounds %[[STRUCT_STRONGSMALL]], ptr %[[_COMPOUNDLITERAL1]], i32 0, i32 0 |
834 | 727 // CHECK: store i32 2, ptr %[[I2]], align 8 |
835 // CHECK: %[[I2:.*]] = getelementptr inbounds %[[STRUCT_STRONGSMALL]], %[[STRUCT_STRONGSMALL]]* %[[_COMPOUNDLITERAL1]], i32 0, i32 0 | 728 // CHECK: %[[F13:.*]] = getelementptr inbounds %[[STRUCT_STRONGSMALL]], ptr %[[_COMPOUNDLITERAL1]], i32 0, i32 1 |
836 // CHECK: store i32 2, i32* %[[I2]], align 8 | 729 // CHECK: store ptr null, ptr %[[F13]], align 8 |
837 // CHECK: %[[F13:.*]] = getelementptr inbounds %[[STRUCT_STRONGSMALL]], %[[STRUCT_STRONGSMALL]]* %[[_COMPOUNDLITERAL1]], i32 0, i32 1 | 730 // CHECK: store i1 true, ptr %[[CLEANUP_COND4]], align 1 |
838 // CHECK: store i8* null, i8** %[[F13]], align 8 | 731 // CHECK: call void @__copy_assignment_8_8_t0w4_s8(ptr %[[V0]], ptr %[[_COMPOUNDLITERAL1]]) |
839 // CHECK: store i1 true, i1* %[[CLEANUP_COND4]], align 1 | |
840 // CHECK: %[[V4:.*]] = bitcast %[[STRUCT_STRONGSMALL]]* %[[V0]] to i8** | |
841 // CHECK: %[[V5:.*]] = bitcast %[[STRUCT_STRONGSMALL]]* %[[_COMPOUNDLITERAL1]] to i8** | |
842 // CHECK: call void @__copy_assignment_8_8_t0w4_s8(i8** %[[V4]], i8** %[[V5]]) | |
843 | 732 |
844 // CHECK: call void @func( | 733 // CHECK: call void @func( |
845 | 734 |
846 // CHECK: %[[V6:.*]] = bitcast %[[STRUCT_STRONGSMALL]]* %[[_COMPOUNDLITERAL1]] to i8** | 735 // CHECK: call void @__destructor_8_s8(ptr %[[_COMPOUNDLITERAL1]]) |
847 // CHECK: call void @__destructor_8_s8(i8** %[[V6]]) | 736 |
848 | 737 // CHECK: call void @__destructor_8_s8(ptr %[[_COMPOUNDLITERAL]]) |
849 // CHECK: %[[V7:.*]] = bitcast %[[STRUCT_STRONGSMALL]]* %[[_COMPOUNDLITERAL]] to i8** | |
850 // CHECK: call void @__destructor_8_s8(i8** %[[V7]]) | |
851 | 738 |
852 void test_compound_literal2(int c, StrongSmall *p) { | 739 void test_compound_literal2(int c, StrongSmall *p) { |
853 *p = c ? (StrongSmall){ 1, 0 } : (StrongSmall){ 2, 0 }; | 740 *p = c ? (StrongSmall){ 1, 0 } : (StrongSmall){ 2, 0 }; |
854 func(0); | 741 func(0); |
855 } | 742 } |
856 | 743 |
857 // CHECK: define{{.*}} void @test_member_access( | 744 // CHECK: define{{.*}} void @test_member_access( |
858 // CHECK: %[[TMP:.*]] = alloca %[[STRUCT_STRONGSMALL]], | 745 // CHECK: %[[TMP:.*]] = alloca %[[STRUCT_STRONGSMALL]], |
859 // CHECK: %[[V3:.*]] = bitcast %[[STRUCT_STRONGSMALL]]* %[[TMP]] to i8** | 746 // CHECK: call void @__destructor_8_s8(ptr %[[TMP]]) |
860 // CHECK: call void @__destructor_8_s8(i8** %[[V3]]) | |
861 // CHECK: call void @func( | 747 // CHECK: call void @func( |
862 | 748 |
863 void test_member_access(void) { | 749 void test_member_access(void) { |
864 g0 = getStrongSmall().f1; | 750 g0 = getStrongSmall().f1; |
865 func(0); | 751 func(0); |
866 } | 752 } |
867 | 753 |
868 // CHECK: define{{.*}} void @test_member_access2(%{{.*}}* noundef %[[C:.*]]) | 754 // CHECK: define{{.*}} void @test_member_access2(ptr noundef %[[C:.*]]) |
869 // CHECK: %[[COERCE:.*]] = alloca %[[STRUCT_STRONGSMALL]], align 8 | 755 // CHECK: %[[COERCE:.*]] = alloca %[[STRUCT_STRONGSMALL]], align 8 |
870 // CHECK: %[[V8:.*]] = bitcast %[[STRUCT_STRONGSMALL]]* %[[COERCE]] to i8** | 756 // CHECK: call void @__destructor_8_s8(ptr %[[COERCE]]) |
871 // CHECK: call void @__destructor_8_s8(i8** %[[V8]]) | |
872 // CHECK: call void @func( | 757 // CHECK: call void @func( |
873 | 758 |
874 void test_member_access2(C *c) { | 759 void test_member_access2(C *c) { |
875 g0 = [c getStrongSmall].f1; | 760 g0 = [c getStrongSmall].f1; |
876 func(0); | 761 func(0); |
877 } | 762 } |
878 | 763 |
879 // CHECK: define{{.*}} void @test_member_access3( | 764 // CHECK: define{{.*}} void @test_member_access3( |
880 // CHECK: %[[COERCE:.*]] = alloca %[[STRUCT_STRONGSMALL]], align 8 | 765 // CHECK: %[[COERCE:.*]] = alloca %[[STRUCT_STRONGSMALL]], align 8 |
881 // CHECK: %[[V8:.*]] = bitcast %[[STRUCT_STRONGSMALL]]* %[[COERCE]] to i8** | 766 // CHECK: call void @__destructor_8_s8(ptr %[[COERCE]]) |
882 // CHECK: call void @__destructor_8_s8(i8** %[[V8]]) | |
883 // CHECK: call void @func( | 767 // CHECK: call void @func( |
884 | 768 |
885 void test_member_access3(void) { | 769 void test_member_access3(void) { |
886 g0 = [C getStrongSmallClass].f1; | 770 g0 = [C getStrongSmallClass].f1; |
887 func(0); | 771 func(0); |
888 } | 772 } |
889 | 773 |
890 // CHECK: define{{.*}} void @test_member_access4() | 774 // CHECK: define{{.*}} void @test_member_access4() |
891 // CHECK: %[[COERCE:.*]] = alloca %[[STRUCT_STRONGSMALL]], align 8 | 775 // CHECK: %[[COERCE:.*]] = alloca %[[STRUCT_STRONGSMALL]], align 8 |
892 // CHECK: %[[V5:.*]] = bitcast %[[STRUCT_STRONGSMALL]]* %[[COERCE]] to i8** | 776 // CHECK: call void @__destructor_8_s8(ptr %[[COERCE]]) |
893 // CHECK: call void @__destructor_8_s8(i8** %[[V5]]) | |
894 // CHECK: call void @func( | 777 // CHECK: call void @func( |
895 | 778 |
896 void test_member_access4(void) { | 779 void test_member_access4(void) { |
897 g0 = ^{ StrongSmall s; return s; }().f1; | 780 g0 = ^{ StrongSmall s; return s; }().f1; |
898 func(0); | 781 func(0); |
899 } | 782 } |
900 | 783 |
901 // CHECK: define{{.*}} void @test_volatile_variable_reference( | 784 // CHECK: define{{.*}} void @test_volatile_variable_reference( |
902 // CHECK: %[[AGG_TMP_ENSURED:.*]] = alloca %[[STRUCT_STRONGSMALL]], | 785 // CHECK: %[[AGG_TMP_ENSURED:.*]] = alloca %[[STRUCT_STRONGSMALL]], |
903 // CHECK: %[[V1:.*]] = bitcast %[[STRUCT_STRONGSMALL]]* %[[AGG_TMP_ENSURED]] to i8** | 786 // CHECK: call void @__copy_constructor_8_8_tv0w32_sv8(ptr %[[AGG_TMP_ENSURED]], ptr %{{.*}}) |
904 // CHECK: %[[V2:.*]] = bitcast %[[STRUCT_STRONGSMALL]]* %{{.*}} to i8** | 787 // CHECK: call void @__destructor_8_s8(ptr %[[AGG_TMP_ENSURED]]) |
905 // CHECK: call void @__copy_constructor_8_8_tv0w32_sv8(i8** %[[V1]], i8** %[[V2]]) | |
906 // CHECK: %[[V3:.*]] = bitcast %[[STRUCT_STRONGSMALL]]* %[[AGG_TMP_ENSURED]] to i8** | |
907 // CHECK: call void @__destructor_8_s8(i8** %[[V3]]) | |
908 // CHECK: call void @func( | 788 // CHECK: call void @func( |
909 | 789 |
910 void test_volatile_variable_reference(volatile StrongSmall *a) { | 790 void test_volatile_variable_reference(volatile StrongSmall *a) { |
911 (void)*a; | 791 (void)*a; |
912 func(0); | 792 func(0); |
923 void test_zero_bitfield(void) { | 803 void test_zero_bitfield(void) { |
924 struct ZeroBitfield volatile a, b; | 804 struct ZeroBitfield volatile a, b; |
925 a = b; | 805 a = b; |
926 } | 806 } |
927 | 807 |
928 // CHECK-LABEL: define{{.*}} i8* @test_conditional0( | 808 // CHECK-LABEL: define{{.*}} ptr @test_conditional0( |
929 // CHECK: %[[TMP:.*]] = alloca %[[STRUCT_STRONGSMALL]], align 8 | 809 // CHECK: %[[TMP:.*]] = alloca %[[STRUCT_STRONGSMALL]], align 8 |
930 | 810 |
931 // CHECK: %[[V1:.*]] = bitcast %[[STRUCT_STRONGSMALL]]* %[[TMP]] to i8** | 811 // CHECK: call void @__copy_constructor_8_8_t0w4_s8(ptr %[[TMP]], ptr @g2) |
932 // CHECK: call void @__copy_constructor_8_8_t0w4_s8(i8** %[[V1]], i8** bitcast (%[[STRUCT_STRONGSMALL]]* @g2 to i8**)) | 812 |
933 | 813 // CHECK: call void @__copy_constructor_8_8_t0w4_s8(ptr %[[TMP]], ptr @g1) |
934 // CHECK: %[[V2:.*]] = bitcast %[[STRUCT_STRONGSMALL]]* %[[TMP]] to i8** | 814 |
935 // CHECK: call void @__copy_constructor_8_8_t0w4_s8(i8** %[[V2]], i8** bitcast (%[[STRUCT_STRONGSMALL]]* @g1 to i8**)) | 815 // CHECK: call void @__destructor_8_s8(ptr %[[TMP]]) |
936 | |
937 // CHECK: %[[V5:.*]] = bitcast %[[STRUCT_STRONGSMALL]]* %[[TMP]] to i8** | |
938 // CHECK: call void @__destructor_8_s8(i8** %[[V5]]) | |
939 // CHECK: @llvm.objc.autoreleaseReturnValue | 816 // CHECK: @llvm.objc.autoreleaseReturnValue |
940 | 817 |
941 id test_conditional0(int c) { | 818 id test_conditional0(int c) { |
942 return (c ? g2 : g1).f1; | 819 return (c ? g2 : g1).f1; |
943 } | 820 } |
944 | 821 |
945 // CHECK-LABEL: define{{.*}} i8* @test_conditional1( | 822 // CHECK-LABEL: define{{.*}} ptr @test_conditional1( |
946 // CHECK-NOT: call void @__destructor | 823 // CHECK-NOT: call void @__destructor |
947 | 824 |
948 id test_conditional1(int c) { | 825 id test_conditional1(int c) { |
949 calleeStrongSmall(c ? g2 : g1); | 826 calleeStrongSmall(c ? g2 : g1); |
950 } | 827 } |
951 | 828 |
952 // CHECK-LABEL: define{{.*}} i8* @test_assignment0( | 829 // CHECK-LABEL: define{{.*}} ptr @test_assignment0( |
953 // CHECK: %[[TMP:.*]] = alloca %[[STRUCT_STRONGSMALL]], align 8 | 830 // CHECK: %[[TMP:.*]] = alloca %[[STRUCT_STRONGSMALL]], align 8 |
954 // CHECK: call void @__copy_assignment_8_8_t0w4_s8(i8** bitcast (%[[STRUCT_STRONGSMALL]]* @g2 to i8**), i8** bitcast (%[[STRUCT_STRONGSMALL]]* @g1 to i8**)) | 831 // CHECK: call void @__copy_assignment_8_8_t0w4_s8(ptr @g2, ptr @g1) |
955 // CHECK: %[[V0:.*]] = bitcast %[[STRUCT_STRONGSMALL]]* %[[TMP]] to i8** | 832 // CHECK: call void @__copy_constructor_8_8_t0w4_s8(ptr %[[TMP]], ptr @g2) |
956 // CHECK: call void @__copy_constructor_8_8_t0w4_s8(i8** %[[V0]], i8** bitcast (%[[STRUCT_STRONGSMALL]]* @g2 to i8**)) | 833 // CHECK: call void @__destructor_8_s8(ptr %[[TMP]]) |
957 // CHECK: %[[V3:.*]] = bitcast %[[STRUCT_STRONGSMALL]]* %[[TMP]] to i8** | |
958 // CHECK: call void @__destructor_8_s8(i8** %[[V3]]) | |
959 | 834 |
960 id test_assignment0(void) { | 835 id test_assignment0(void) { |
961 return (g2 = g1).f1; | 836 return (g2 = g1).f1; |
962 } | 837 } |
963 | 838 |
964 // CHECK-LABEL: define{{.*}} i8* @test_assignment1( | 839 // CHECK-LABEL: define{{.*}} ptr @test_assignment1( |
965 // CHECK-NOT: call void @__destructor | 840 // CHECK-NOT: call void @__destructor |
966 | 841 |
967 id test_assignment1(void) { | 842 id test_assignment1(void) { |
968 calleeStrongSmall(g2 = g1); | 843 calleeStrongSmall(g2 = g1); |
969 } | 844 } |
970 | 845 |
971 // CHECK-LABEL: define{{.*}} void @test_null_reveiver( | 846 // CHECK-LABEL: define{{.*}} void @test_null_reveiver( |
972 // CHECK: %[[AGG_TMP:.*]] = alloca %[[STRUCT_STRONGSMALL]], align 8 | 847 // CHECK: %[[AGG_TMP:.*]] = alloca %[[STRUCT_STRONGSMALL]], align 8 |
973 // CHECK: br i1 | 848 // CHECK: br i1 |
974 | 849 |
975 // CHECK: %[[V7:.*]] = bitcast %[[STRUCT_STRONGSMALL]]* %[[AGG_TMP]] to [2 x i64]* | 850 // CHECK: %[[V8:.*]] = load [2 x i64], ptr %[[AGG_TMP]], align 8 |
976 // CHECK: %[[V8:.*]] = load [2 x i64], [2 x i64]* %[[V7]], align 8 | 851 // CHECK: call void @objc_msgSend({{.*}}, [2 x i64] %[[V8]]) |
977 // CHECK: call void bitcast (i8* (i8*, i8*, ...)* @objc_msgSend to void ({{.*}}, [2 x i64] %[[V8]]) | |
978 // CHECK: br | 852 // CHECK: br |
979 | 853 |
980 // CHECK: %[[V9:.*]] = bitcast %[[STRUCT_STRONGSMALL]]* %[[AGG_TMP]] to i8** | 854 // CHECK: call void @__destructor_8_s8(ptr %[[AGG_TMP]]) #4 |
981 // CHECK: call void @__destructor_8_s8(i8** %[[V9]]) #4 | |
982 // CHECK: br | 855 // CHECK: br |
983 | 856 |
984 void test_null_reveiver(C *c) { | 857 void test_null_reveiver(C *c) { |
985 [c m:getStrongSmall()]; | 858 [c m:getStrongSmall()]; |
986 } | 859 } |