Mercurial > hg > CbC > CbC_llvm
comparison llvm/test/CodeGen/AMDGPU/sgpr-copy.ll @ 252:1f2b6ac9f198 llvm-original
LLVM16-1
author | Shinji KONO <kono@ie.u-ryukyu.ac.jp> |
---|---|
date | Fri, 18 Aug 2023 09:04:13 +0900 |
parents | c4bab56944e8 |
children |
comparison
equal
deleted
inserted
replaced
237:c80f45b162ad | 252:1f2b6ac9f198 |
---|---|
3 | 3 |
4 ; CHECK-LABEL: {{^}}phi1: | 4 ; CHECK-LABEL: {{^}}phi1: |
5 ; CHECK: s_buffer_load_dword [[DST:s[0-9]]], {{s\[[0-9]+:[0-9]+\]}}, 0x0 | 5 ; CHECK: s_buffer_load_dword [[DST:s[0-9]]], {{s\[[0-9]+:[0-9]+\]}}, 0x0 |
6 ; CHECK: ; %bb.1: ; %ELSE | 6 ; CHECK: ; %bb.1: ; %ELSE |
7 ; CHECK: s_xor_b32 s{{[0-9]}}, [[DST]] | 7 ; CHECK: s_xor_b32 s{{[0-9]}}, [[DST]] |
8 define amdgpu_ps void @phi1(<4 x i32> addrspace(4)* inreg %arg, <4 x i32> addrspace(4)* inreg %arg1, <8 x i32> addrspace(4)* inreg %arg2, i32 inreg %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <3 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19) #0 { | 8 define amdgpu_ps void @phi1(ptr addrspace(4) inreg %arg, ptr addrspace(4) inreg %arg1, ptr addrspace(4) inreg %arg2, i32 inreg %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <3 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19) #0 { |
9 main_body: | 9 main_body: |
10 %tmp = getelementptr <4 x i32>, <4 x i32> addrspace(4)* %arg, i32 0 | 10 %tmp20 = load <4 x i32>, ptr addrspace(4) %arg, !tbaa !0 |
11 %tmp20 = load <4 x i32>, <4 x i32> addrspace(4)* %tmp, !tbaa !0 | |
12 %tmp21 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp20, i32 0, i32 0) | 11 %tmp21 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp20, i32 0, i32 0) |
13 %tmp22 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp20, i32 16, i32 0) | 12 %tmp22 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp20, i32 16, i32 0) |
14 %tmp23 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp20, i32 32, i32 0) | 13 %tmp23 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp20, i32 32, i32 0) |
15 %tmp24 = fptosi float %tmp22 to i32 | 14 %tmp24 = fptosi float %tmp22 to i32 |
16 %tmp25 = icmp ne i32 %tmp24, 0 | 15 %tmp25 = icmp ne i32 %tmp24, 0 |
27 ret void | 26 ret void |
28 } | 27 } |
29 | 28 |
30 ; Make sure this program doesn't crash | 29 ; Make sure this program doesn't crash |
31 ; CHECK-LABEL: {{^}}phi2: | 30 ; CHECK-LABEL: {{^}}phi2: |
32 define amdgpu_ps void @phi2(<4 x i32> addrspace(4)* inreg %arg, <4 x i32> addrspace(4)* inreg %arg1, <8 x i32> addrspace(4)* inreg %arg2, i32 inreg %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <3 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19) #1 { | 31 define amdgpu_ps void @phi2(ptr addrspace(4) inreg %arg, ptr addrspace(4) inreg %arg1, ptr addrspace(4) inreg %arg2, i32 inreg %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <3 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19) #1 { |
33 main_body: | 32 main_body: |
34 %tmp = getelementptr <4 x i32>, <4 x i32> addrspace(4)* %arg, i32 0 | 33 %tmp20 = load <4 x i32>, ptr addrspace(4) %arg, !tbaa !0 |
35 %tmp20 = load <4 x i32>, <4 x i32> addrspace(4)* %tmp, !tbaa !0 | |
36 %tmp21 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp20, i32 16, i32 0) | 34 %tmp21 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp20, i32 16, i32 0) |
37 %tmp22 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp20, i32 32, i32 0) | 35 %tmp22 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp20, i32 32, i32 0) |
38 %tmp23 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp20, i32 36, i32 0) | 36 %tmp23 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp20, i32 36, i32 0) |
39 %tmp24 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp20, i32 40, i32 0) | 37 %tmp24 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp20, i32 40, i32 0) |
40 %tmp25 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp20, i32 48, i32 0) | 38 %tmp25 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp20, i32 48, i32 0) |
46 %tmp31 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp20, i32 76, i32 0) | 44 %tmp31 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp20, i32 76, i32 0) |
47 %tmp32 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp20, i32 80, i32 0) | 45 %tmp32 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp20, i32 80, i32 0) |
48 %tmp33 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp20, i32 84, i32 0) | 46 %tmp33 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp20, i32 84, i32 0) |
49 %tmp34 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp20, i32 88, i32 0) | 47 %tmp34 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp20, i32 88, i32 0) |
50 %tmp35 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp20, i32 92, i32 0) | 48 %tmp35 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp20, i32 92, i32 0) |
51 %tmp36 = getelementptr <8 x i32>, <8 x i32> addrspace(4)* %arg2, i32 0 | 49 %tmp37 = load <8 x i32>, ptr addrspace(4) %arg2, !tbaa !0 |
52 %tmp37 = load <8 x i32>, <8 x i32> addrspace(4)* %tmp36, !tbaa !0 | 50 %tmp39 = load <4 x i32>, ptr addrspace(4) %arg1, !tbaa !0 |
53 %tmp38 = getelementptr <4 x i32>, <4 x i32> addrspace(4)* %arg1, i32 0 | |
54 %tmp39 = load <4 x i32>, <4 x i32> addrspace(4)* %tmp38, !tbaa !0 | |
55 %i.i = extractelement <2 x i32> %arg5, i32 0 | 51 %i.i = extractelement <2 x i32> %arg5, i32 0 |
56 %j.i = extractelement <2 x i32> %arg5, i32 1 | 52 %j.i = extractelement <2 x i32> %arg5, i32 1 |
57 %i.f.i = bitcast i32 %i.i to float | 53 %i.f.i = bitcast i32 %i.i to float |
58 %j.f.i = bitcast i32 %j.i to float | 54 %j.f.i = bitcast i32 %j.i to float |
59 %p1.i = call float @llvm.amdgcn.interp.p1(float %i.f.i, i32 0, i32 0, i32 %arg3) #1 | 55 %p1.i = call float @llvm.amdgcn.interp.p1(float %i.f.i, i32 0, i32 0, i32 %arg3) #1 |
167 ret void | 163 ret void |
168 } | 164 } |
169 | 165 |
170 ; We just want to make sure the program doesn't crash | 166 ; We just want to make sure the program doesn't crash |
171 ; CHECK-LABEL: {{^}}loop: | 167 ; CHECK-LABEL: {{^}}loop: |
172 define amdgpu_ps void @loop(<4 x i32> addrspace(4)* inreg %arg, <4 x i32> addrspace(4)* inreg %arg1, <8 x i32> addrspace(4)* inreg %arg2, i32 inreg %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <3 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19) #0 { | 168 define amdgpu_ps void @loop(ptr addrspace(4) inreg %arg, ptr addrspace(4) inreg %arg1, ptr addrspace(4) inreg %arg2, i32 inreg %arg3, <2 x i32> %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <3 x i32> %arg7, <2 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, float %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19) #0 { |
173 main_body: | 169 main_body: |
174 %tmp = getelementptr <4 x i32>, <4 x i32> addrspace(4)* %arg, i32 0 | 170 %tmp20 = load <4 x i32>, ptr addrspace(4) %arg, !tbaa !0 |
175 %tmp20 = load <4 x i32>, <4 x i32> addrspace(4)* %tmp, !tbaa !0 | |
176 %tmp21 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp20, i32 0, i32 0) | 171 %tmp21 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp20, i32 0, i32 0) |
177 %tmp22 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp20, i32 4, i32 0) | 172 %tmp22 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp20, i32 4, i32 0) |
178 %tmp23 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp20, i32 8, i32 0) | 173 %tmp23 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp20, i32 8, i32 0) |
179 %tmp24 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp20, i32 12, i32 0) | 174 %tmp24 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp20, i32 12, i32 0) |
180 %tmp25 = fptosi float %tmp24 to i32 | 175 %tmp25 = fptosi float %tmp24 to i32 |
220 ; CHECK-DAG: v_mov_b32_e32 v[[SAMPLE_HI:[0-9]+]], 13 | 215 ; CHECK-DAG: v_mov_b32_e32 v[[SAMPLE_HI:[0-9]+]], 13 |
221 | 216 |
222 ; CHECK: image_sample v{{\[[0-9]+:[0-9]+\]}}, v[[[SAMPLE_LO]]:[[SAMPLE_HI]]] | 217 ; CHECK: image_sample v{{\[[0-9]+:[0-9]+\]}}, v[[[SAMPLE_LO]]:[[SAMPLE_HI]]] |
223 ; CHECK: exp | 218 ; CHECK: exp |
224 ; CHECK: s_endpgm | 219 ; CHECK: s_endpgm |
225 define amdgpu_ps void @sample_v3([17 x <4 x i32>] addrspace(4)* inreg %arg, [32 x <4 x i32>] addrspace(4)* inreg %arg1, [16 x <8 x i32>] addrspace(4)* inreg %arg2, float inreg %arg3, i32 inreg %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <2 x i32> %arg7, <3 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, <2 x i32> %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19, float %arg20) #0 { | 220 define amdgpu_ps void @sample_v3(ptr addrspace(4) inreg %arg, ptr addrspace(4) inreg %arg1, ptr addrspace(4) inreg %arg2, float inreg %arg3, i32 inreg %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <2 x i32> %arg7, <3 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, <2 x i32> %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19, float %arg20) #0 { |
226 entry: | 221 entry: |
227 %tmp = getelementptr [17 x <4 x i32>], [17 x <4 x i32>] addrspace(4)* %arg, i64 0, i32 0 | 222 %tmp21 = load <4 x i32>, ptr addrspace(4) %arg, !tbaa !0 |
228 %tmp21 = load <4 x i32>, <4 x i32> addrspace(4)* %tmp, !tbaa !0 | |
229 %tmp22 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp21, i32 16, i32 0) | 223 %tmp22 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp21, i32 16, i32 0) |
230 %tmp23 = getelementptr [16 x <8 x i32>], [16 x <8 x i32>] addrspace(4)* %arg2, i64 0, i32 0 | 224 %tmp24 = load <8 x i32>, ptr addrspace(4) %arg2, !tbaa !0 |
231 %tmp24 = load <8 x i32>, <8 x i32> addrspace(4)* %tmp23, !tbaa !0 | 225 %tmp26 = load <4 x i32>, ptr addrspace(4) %arg1, !tbaa !0 |
232 %tmp25 = getelementptr [32 x <4 x i32>], [32 x <4 x i32>] addrspace(4)* %arg1, i64 0, i32 0 | |
233 %tmp26 = load <4 x i32>, <4 x i32> addrspace(4)* %tmp25, !tbaa !0 | |
234 %tmp27 = fcmp oeq float %tmp22, 0.000000e+00 | 226 %tmp27 = fcmp oeq float %tmp22, 0.000000e+00 |
235 %tmp26.bc = bitcast <4 x i32> %tmp26 to <4 x i32> | 227 %tmp26.bc = bitcast <4 x i32> %tmp26 to <4 x i32> |
236 br i1 %tmp27, label %if, label %else | 228 br i1 %tmp27, label %if, label %else |
237 | 229 |
238 if: ; preds = %entry | 230 if: ; preds = %entry |
259 | 251 |
260 ; CHECK-LABEL: {{^}}copy1: | 252 ; CHECK-LABEL: {{^}}copy1: |
261 ; CHECK: buffer_load_dword | 253 ; CHECK: buffer_load_dword |
262 ; CHECK: v_add | 254 ; CHECK: v_add |
263 ; CHECK: s_endpgm | 255 ; CHECK: s_endpgm |
264 define amdgpu_kernel void @copy1(float addrspace(1)* %out, float addrspace(1)* %in0) { | 256 define amdgpu_kernel void @copy1(ptr addrspace(1) %out, ptr addrspace(1) %in0) { |
265 entry: | 257 entry: |
266 %tmp = load float, float addrspace(1)* %in0 | 258 %tmp = load float, ptr addrspace(1) %in0 |
267 %tmp1 = fcmp oeq float %tmp, 0.000000e+00 | 259 %tmp1 = fcmp oeq float %tmp, 0.000000e+00 |
268 br i1 %tmp1, label %if0, label %endif | 260 br i1 %tmp1, label %if0, label %endif |
269 | 261 |
270 if0: ; preds = %entry | 262 if0: ; preds = %entry |
271 %tmp2 = bitcast float %tmp to i32 | 263 %tmp2 = bitcast float %tmp to i32 |
277 br label %endif | 269 br label %endif |
278 | 270 |
279 endif: ; preds = %if1, %if0, %entry | 271 endif: ; preds = %if1, %if0, %entry |
280 %tmp5 = phi i32 [ 0, %entry ], [ %tmp2, %if0 ], [ %tmp4, %if1 ] | 272 %tmp5 = phi i32 [ 0, %entry ], [ %tmp2, %if0 ], [ %tmp4, %if1 ] |
281 %tmp6 = bitcast i32 %tmp5 to float | 273 %tmp6 = bitcast i32 %tmp5 to float |
282 store float %tmp6, float addrspace(1)* %out | 274 store float %tmp6, ptr addrspace(1) %out |
283 ret void | 275 ret void |
284 } | 276 } |
285 | 277 |
286 ; This test is just checking that we don't crash / assertion fail. | 278 ; This test is just checking that we don't crash / assertion fail. |
287 ; CHECK-LABEL: {{^}}copy2: | 279 ; CHECK-LABEL: {{^}}copy2: |
288 ; CHECK: s_endpgm | 280 ; CHECK: s_endpgm |
289 define amdgpu_ps void @copy2([17 x <4 x i32>] addrspace(4)* inreg %arg, [32 x <4 x i32>] addrspace(4)* inreg %arg1, [16 x <8 x i32>] addrspace(4)* inreg %arg2, float inreg %arg3, i32 inreg %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <2 x i32> %arg7, <3 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, <2 x i32> %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19, float %arg20) #0 { | 281 define amdgpu_ps void @copy2(ptr addrspace(4) inreg %arg, ptr addrspace(4) inreg %arg1, ptr addrspace(4) inreg %arg2, float inreg %arg3, i32 inreg %arg4, <2 x i32> %arg5, <2 x i32> %arg6, <2 x i32> %arg7, <3 x i32> %arg8, <2 x i32> %arg9, <2 x i32> %arg10, <2 x i32> %arg11, float %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, float %arg19, float %arg20) #0 { |
290 entry: | 282 entry: |
291 br label %LOOP68 | 283 br label %LOOP68 |
292 | 284 |
293 LOOP68: ; preds = %ENDIF69, %entry | 285 LOOP68: ; preds = %ENDIF69, %entry |
294 %temp4.7 = phi float [ 0.000000e+00, %entry ], [ %v, %ENDIF69 ] | 286 %temp4.7 = phi float [ 0.000000e+00, %entry ], [ %v, %ENDIF69 ] |
322 | 314 |
323 ; [[END]]: | 315 ; [[END]]: |
324 ; CHECK: v_add_{{[iu]}}32_e32 v[[ADD:[0-9]+]], vcc, 1, v{{[0-9]+}} | 316 ; CHECK: v_add_{{[iu]}}32_e32 v[[ADD:[0-9]+]], vcc, 1, v{{[0-9]+}} |
325 ; CHECK: image_sample v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+}}:[[ADD]]] | 317 ; CHECK: image_sample v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+}}:[[ADD]]] |
326 ; CHECK: s_branch | 318 ; CHECK: s_branch |
327 define amdgpu_ps void @sample_rsrc([6 x <4 x i32>] addrspace(4)* inreg %arg, [17 x <4 x i32>] addrspace(4)* inreg %arg1, [16 x <4 x i32>] addrspace(4)* inreg %arg2, [32 x <8 x i32>] addrspace(4)* inreg %arg3, float inreg %arg4, i32 inreg %arg5, <2 x i32> %arg6, <2 x i32> %arg7, <2 x i32> %arg8, <3 x i32> %arg9, <2 x i32> %arg10, <2 x i32> %arg11, <2 x i32> %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, i32 %arg19, float %arg20, float %arg21) #0 { | 319 define amdgpu_ps void @sample_rsrc(ptr addrspace(4) inreg %arg, ptr addrspace(4) inreg %arg1, ptr addrspace(4) inreg %arg2, ptr addrspace(4) inreg %arg3, float inreg %arg4, i32 inreg %arg5, <2 x i32> %arg6, <2 x i32> %arg7, <2 x i32> %arg8, <3 x i32> %arg9, <2 x i32> %arg10, <2 x i32> %arg11, <2 x i32> %arg12, float %arg13, float %arg14, float %arg15, float %arg16, float %arg17, float %arg18, i32 %arg19, float %arg20, float %arg21) #0 { |
328 bb: | 320 bb: |
329 %tmp = getelementptr [17 x <4 x i32>], [17 x <4 x i32>] addrspace(4)* %arg1, i32 0, i32 0 | 321 %tmp22 = load <4 x i32>, ptr addrspace(4) %arg1, !tbaa !3 |
330 %tmp22 = load <4 x i32>, <4 x i32> addrspace(4)* %tmp, !tbaa !3 | |
331 %tmp23 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp22, i32 16, i32 0) | 322 %tmp23 = call float @llvm.amdgcn.s.buffer.load.f32(<4 x i32> %tmp22, i32 16, i32 0) |
332 %tmp25 = getelementptr [32 x <8 x i32>], [32 x <8 x i32>] addrspace(4)* %arg3, i32 0, i32 0 | 323 %tmp26 = load <8 x i32>, ptr addrspace(4) %arg3, !tbaa !3 |
333 %tmp26 = load <8 x i32>, <8 x i32> addrspace(4)* %tmp25, !tbaa !3 | 324 %tmp28 = load <4 x i32>, ptr addrspace(4) %arg2, !tbaa !3 |
334 %tmp27 = getelementptr [16 x <4 x i32>], [16 x <4 x i32>] addrspace(4)* %arg2, i32 0, i32 0 | |
335 %tmp28 = load <4 x i32>, <4 x i32> addrspace(4)* %tmp27, !tbaa !3 | |
336 %i.i = extractelement <2 x i32> %arg7, i32 0 | 325 %i.i = extractelement <2 x i32> %arg7, i32 0 |
337 %j.i = extractelement <2 x i32> %arg7, i32 1 | 326 %j.i = extractelement <2 x i32> %arg7, i32 1 |
338 %i.f.i = bitcast i32 %i.i to float | 327 %i.f.i = bitcast i32 %i.i to float |
339 %j.f.i = bitcast i32 %j.i to float | 328 %j.f.i = bitcast i32 %j.i to float |
340 %p1.i = call float @llvm.amdgcn.interp.p1(float %i.f.i, i32 0, i32 0, i32 %arg5) #0 | 329 %p1.i = call float @llvm.amdgcn.interp.p1(float %i.f.i, i32 0, i32 0, i32 %arg5) #0 |
372 } | 361 } |
373 | 362 |
374 ; Check the resource descriptor is stored in an sgpr. | 363 ; Check the resource descriptor is stored in an sgpr. |
375 ; CHECK-LABEL: {{^}}mimg_srsrc_sgpr: | 364 ; CHECK-LABEL: {{^}}mimg_srsrc_sgpr: |
376 ; CHECK: image_sample v{{[0-9]+}}, v[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}] dmask:0x1 | 365 ; CHECK: image_sample v{{[0-9]+}}, v[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}] dmask:0x1 |
377 define amdgpu_ps void @mimg_srsrc_sgpr([34 x <8 x i32>] addrspace(4)* inreg %arg) #0 { | 366 define amdgpu_ps void @mimg_srsrc_sgpr(ptr addrspace(4) inreg %arg) #0 { |
378 bb: | 367 bb: |
379 %tid = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0) #0 | 368 %tid = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0) #0 |
380 %tmp7 = getelementptr [34 x <8 x i32>], [34 x <8 x i32>] addrspace(4)* %arg, i32 0, i32 %tid | 369 %tmp7 = getelementptr [34 x <8 x i32>], ptr addrspace(4) %arg, i32 0, i32 %tid |
381 %tmp8 = load <8 x i32>, <8 x i32> addrspace(4)* %tmp7, align 32, !tbaa !0 | 370 %tmp8 = load <8 x i32>, ptr addrspace(4) %tmp7, align 32, !tbaa !0 |
382 %tmp = call <4 x float> @llvm.amdgcn.image.sample.2d.v4f32.f32(i32 15, float 7.500000e-01, float 2.500000e-01, <8 x i32> %tmp8, <4 x i32> undef, i1 0, i32 0, i32 0) | 371 %tmp = call <4 x float> @llvm.amdgcn.image.sample.2d.v4f32.f32(i32 15, float 7.500000e-01, float 2.500000e-01, <8 x i32> %tmp8, <4 x i32> undef, i1 0, i32 0, i32 0) |
383 %tmp10 = extractelement <4 x float> %tmp, i32 0 | 372 %tmp10 = extractelement <4 x float> %tmp, i32 0 |
384 %tmp12 = call <2 x half> @llvm.amdgcn.cvt.pkrtz(float undef, float %tmp10) | 373 %tmp12 = call <2 x half> @llvm.amdgcn.cvt.pkrtz(float undef, float %tmp10) |
385 call void @llvm.amdgcn.exp.compr.v2f16(i32 0, i32 15, <2 x half> %tmp12, <2 x half> undef, i1 true, i1 true) #0 | 374 call void @llvm.amdgcn.exp.compr.v2f16(i32 0, i32 15, <2 x half> %tmp12, <2 x half> undef, i1 true, i1 true) #0 |
386 ret void | 375 ret void |
387 } | 376 } |
388 | 377 |
389 ; Check the sampler is stored in an sgpr. | 378 ; Check the sampler is stored in an sgpr. |
390 ; CHECK-LABEL: {{^}}mimg_ssamp_sgpr: | 379 ; CHECK-LABEL: {{^}}mimg_ssamp_sgpr: |
391 ; CHECK: image_sample v{{[0-9]+}}, v[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}] dmask:0x1 | 380 ; CHECK: image_sample v{{[0-9]+}}, v[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}] dmask:0x1 |
392 define amdgpu_ps void @mimg_ssamp_sgpr([17 x <4 x i32>] addrspace(4)* inreg %arg) #0 { | 381 define amdgpu_ps void @mimg_ssamp_sgpr(ptr addrspace(4) inreg %arg) #0 { |
393 bb: | 382 bb: |
394 %tid = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0) #0 | 383 %tid = call i32 @llvm.amdgcn.mbcnt.lo(i32 -1, i32 0) #0 |
395 %tmp7 = getelementptr [17 x <4 x i32>], [17 x <4 x i32>] addrspace(4)* %arg, i32 0, i32 %tid | 384 %tmp7 = getelementptr [17 x <4 x i32>], ptr addrspace(4) %arg, i32 0, i32 %tid |
396 %tmp8 = load <4 x i32>, <4 x i32> addrspace(4)* %tmp7, align 16, !tbaa !0 | 385 %tmp8 = load <4 x i32>, ptr addrspace(4) %tmp7, align 16, !tbaa !0 |
397 %tmp = call <4 x float> @llvm.amdgcn.image.sample.2d.v4f32.f32(i32 15, float 7.500000e-01, float 2.500000e-01, <8 x i32> undef, <4 x i32> %tmp8, i1 0, i32 0, i32 0) | 386 %tmp = call <4 x float> @llvm.amdgcn.image.sample.2d.v4f32.f32(i32 15, float 7.500000e-01, float 2.500000e-01, <8 x i32> undef, <4 x i32> %tmp8, i1 0, i32 0, i32 0) |
398 %tmp10 = extractelement <4 x float> %tmp, i32 0 | 387 %tmp10 = extractelement <4 x float> %tmp, i32 0 |
399 %tmp12 = call <2 x half> @llvm.amdgcn.cvt.pkrtz(float %tmp10, float undef) | 388 %tmp12 = call <2 x half> @llvm.amdgcn.cvt.pkrtz(float %tmp10, float undef) |
400 call void @llvm.amdgcn.exp.compr.v2f16(i32 0, i32 15, <2 x half> %tmp12, <2 x half> undef, i1 true, i1 true) #0 | 389 call void @llvm.amdgcn.exp.compr.v2f16(i32 0, i32 15, <2 x half> %tmp12, <2 x half> undef, i1 true, i1 true) #0 |
401 ret void | 390 ret void |