252
|
1 ; RUN: llc -march=amdgcn -mtriple=amdgcn-amd-amdhsa -mcpu=kaveri -mattr=-promote-alloca -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=HSA -check-prefix=CI %s
|
|
2 ; RUN: llc -march=amdgcn -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -mattr=-promote-alloca -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=HSA -check-prefix=GFX9 %s
|
150
|
3
|
|
4 ; HSA-LABEL: {{^}}use_group_to_flat_addrspacecast:
|
|
5 ; HSA: enable_sgpr_private_segment_buffer = 1
|
|
6 ; HSA: enable_sgpr_dispatch_ptr = 0
|
|
7 ; CI: enable_sgpr_queue_ptr = 1
|
|
8 ; GFX9: enable_sgpr_queue_ptr = 0
|
|
9
|
|
10 ; CI-DAG: s_load_dword [[PTR:s[0-9]+]], s[6:7], 0x0{{$}}
|
|
11 ; CI-DAG: s_load_dword [[APERTURE:s[0-9]+]], s[4:5], 0x10{{$}}
|
236
|
12 ; CI-DAG: s_cmp_lg_u32 [[PTR]], -1
|
|
13 ; CI-DAG: s_cselect_b32 s[[HI:[0-9]+]], [[APERTURE]], 0
|
|
14 ; CI-DAG: s_cselect_b32 s[[LO:[0-9]+]], [[PTR]], 0
|
150
|
15
|
252
|
16 ; GFX9-DAG: s_mov_b64 s[{{[0-9]+}}:[[HIBASE:[0-9]+]]], src_shared_base
|
|
17
|
150
|
18 ; HSA-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 7
|
|
19 ; GFX9-DAG: s_load_dword [[PTR:s[0-9]+]], s[4:5], 0x0{{$}}
|
|
20
|
236
|
21 ; GFX9: s_cmp_lg_u32 [[PTR]], -1
|
252
|
22 ; GFX9-DAG: s_cselect_b32 s[[LO:[0-9]+]], s[[HIBASE]], 0
|
|
23 ; GFX9-DAG: s_cselect_b32 s[[HI:[0-9]+]], [[PTR]], 0
|
150
|
24
|
236
|
25 ; HSA: flat_store_dword v[[[LO]]:[[HI]]], [[K]]
|
150
|
26
|
|
27 ; At most 2 digits. Make sure src_shared_base is not counted as a high
|
|
28 ; number SGPR.
|
|
29
|
236
|
30 ; HSA: NumSgprs: {{[0-9]+}}
|
252
|
31 define amdgpu_kernel void @use_group_to_flat_addrspacecast(ptr addrspace(3) %ptr) #0 {
|
|
32 %stof = addrspacecast ptr addrspace(3) %ptr to ptr
|
|
33 store volatile i32 7, ptr %stof
|
150
|
34 ret void
|
|
35 }
|
|
36
|
173
|
37 ; Test handling inside a non-kernel
|
|
38 ; HSA-LABEL: {{^}}use_group_to_flat_addrspacecast_func:
|
236
|
39 ; CI-DAG: s_load_dword [[APERTURE:s[0-9]+]], s[6:7], 0x10{{$}}
|
173
|
40 ; CI-DAG: v_mov_b32_e32 [[VAPERTURE:v[0-9]+]], [[APERTURE]]
|
|
41 ; CI-DAG: v_cmp_ne_u32_e32 vcc, -1, v0
|
|
42 ; CI-DAG: v_cndmask_b32_e32 v[[HI:[0-9]+]], 0, [[VAPERTURE]], vcc
|
|
43 ; CI-DAG: v_cndmask_b32_e32 v[[LO:[0-9]+]], 0, v0
|
|
44
|
252
|
45 ; GFX9-DAG: s_mov_b64 s[{{[0-9]+}}:[[HIBASE:[0-9]+]]], src_shared_base
|
|
46
|
173
|
47 ; HSA-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 7
|
|
48
|
252
|
49 ; GFX9-DAG: v_mov_b32_e32 v[[VREG_HIBASE:[0-9]+]], s[[HIBASE]]
|
173
|
50 ; GFX9-DAG: v_cmp_ne_u32_e32 vcc, -1, v0
|
|
51 ; GFX9-DAG: v_cndmask_b32_e32 v[[LO:[0-9]+]], 0, v0, vcc
|
252
|
52 ; GFX9-DAG: v_cndmask_b32_e32 v[[HI:[0-9]+]], 0, v[[VREG_HIBASE]], vcc
|
173
|
53
|
236
|
54 ; HSA: flat_store_dword v[[[LO]]:[[HI]]], [[K]]
|
252
|
55 define void @use_group_to_flat_addrspacecast_func(ptr addrspace(3) %ptr) #0 {
|
|
56 %stof = addrspacecast ptr addrspace(3) %ptr to ptr
|
|
57 store volatile i32 7, ptr %stof
|
173
|
58 ret void
|
|
59 }
|
|
60
|
150
|
61 ; HSA-LABEL: {{^}}use_private_to_flat_addrspacecast:
|
|
62 ; HSA: enable_sgpr_private_segment_buffer = 1
|
|
63 ; HSA: enable_sgpr_dispatch_ptr = 0
|
|
64 ; CI: enable_sgpr_queue_ptr = 1
|
|
65 ; GFX9: enable_sgpr_queue_ptr = 0
|
|
66
|
|
67 ; CI-DAG: s_load_dword [[PTR:s[0-9]+]], s[6:7], 0x0{{$}}
|
|
68 ; CI-DAG: s_load_dword [[APERTURE:s[0-9]+]], s[4:5], 0x11{{$}}
|
|
69
|
|
70 ; CI-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 7
|
236
|
71 ; CI-DAG: s_cmp_lg_u32 [[PTR]], -1
|
|
72 ; CI-DAG: s_cselect_b32 s[[HI:[0-9]+]], [[APERTURE]], 0
|
|
73 ; CI-DAG: s_cselect_b32 s[[LO:[0-9]+]], [[PTR]], 0
|
150
|
74
|
|
75 ; GFX9-DAG: s_load_dword [[PTR:s[0-9]+]], s[4:5], 0x0{{$}}
|
252
|
76 ; GFX9-DAG: s_mov_b64 s[{{[0-9]+}}:[[HIBASE:[0-9]+]]], src_private_base
|
150
|
77
|
|
78 ; GFX9-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 7
|
236
|
79 ; GFX9: s_cmp_lg_u32 [[PTR]], -1
|
252
|
80 ; GFX9: s_cselect_b32 s[[LO:[0-9]+]], s[[HIBASE]], 0
|
|
81 ; GFX9: s_cselect_b32 s[[HI:[0-9]+]], [[PTR]], 0
|
150
|
82
|
236
|
83 ; HSA: flat_store_dword v[[[LO]]:[[HI]]], [[K]]
|
150
|
84
|
236
|
85 ; HSA: NumSgprs: {{[0-9]+}}
|
252
|
86 define amdgpu_kernel void @use_private_to_flat_addrspacecast(ptr addrspace(5) %ptr) #0 {
|
|
87 %stof = addrspacecast ptr addrspace(5) %ptr to ptr
|
|
88 store volatile i32 7, ptr %stof
|
150
|
89 ret void
|
|
90 }
|
|
91
|
|
92 ; no-op
|
|
93 ; HSA-LABEL: {{^}}use_global_to_flat_addrspacecast:
|
|
94 ; HSA: enable_sgpr_queue_ptr = 0
|
|
95
|
236
|
96 ; HSA: s_load_dwordx2 s[[[PTRLO:[0-9]+]]:[[PTRHI:[0-9]+]]]
|
150
|
97 ; HSA-DAG: v_mov_b32_e32 v[[VPTRLO:[0-9]+]], s[[PTRLO]]
|
|
98 ; HSA-DAG: v_mov_b32_e32 v[[VPTRHI:[0-9]+]], s[[PTRHI]]
|
|
99 ; HSA-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 7
|
236
|
100 ; HSA: flat_store_dword v[[[VPTRLO]]:[[VPTRHI]]], [[K]]
|
252
|
101 define amdgpu_kernel void @use_global_to_flat_addrspacecast(ptr addrspace(1) %ptr) #0 {
|
|
102 %stof = addrspacecast ptr addrspace(1) %ptr to ptr
|
|
103 store volatile i32 7, ptr %stof
|
150
|
104 ret void
|
|
105 }
|
|
106
|
|
107 ; no-op
|
|
108 ; HSA-LABEl: {{^}}use_constant_to_flat_addrspacecast:
|
236
|
109 ; HSA: s_load_dwordx2 s[[[PTRLO:[0-9]+]]:[[PTRHI:[0-9]+]]]
|
150
|
110 ; HSA-DAG: v_mov_b32_e32 v[[VPTRLO:[0-9]+]], s[[PTRLO]]
|
|
111 ; HSA-DAG: v_mov_b32_e32 v[[VPTRHI:[0-9]+]], s[[PTRHI]]
|
236
|
112 ; HSA: flat_load_dword v{{[0-9]+}}, v[[[VPTRLO]]:[[VPTRHI]]]
|
252
|
113 define amdgpu_kernel void @use_constant_to_flat_addrspacecast(ptr addrspace(4) %ptr) #0 {
|
|
114 %stof = addrspacecast ptr addrspace(4) %ptr to ptr
|
|
115 %ld = load volatile i32, ptr %stof
|
150
|
116 ret void
|
|
117 }
|
|
118
|
173
|
119 ; HSA-LABEl: {{^}}use_constant_to_global_addrspacecast:
|
236
|
120 ; HSA: s_load_dwordx2 s[[[PTRLO:[0-9]+]]:[[PTRHI:[0-9]+]]]
|
221
|
121 ; CI-DAG: v_mov_b32_e32 v[[VPTRLO:[0-9]+]], s[[PTRLO]]
|
|
122 ; CI-DAG: v_mov_b32_e32 v[[VPTRHI:[0-9]+]], s[[PTRHI]]
|
236
|
123 ; CI: {{flat|global}}_load_dword v{{[0-9]+}}, v[[[VPTRLO]]:[[VPTRHI]]]
|
221
|
124
|
|
125 ; GFX9: v_mov_b32_e32 [[ZERO:v[0-9]+]], 0{{$}}
|
236
|
126 ; GFX9: global_load_dword v{{[0-9]+}}, [[ZERO:v[0-9]+]], s[[[PTRLO]]:[[PTRHI]]]
|
252
|
127 define amdgpu_kernel void @use_constant_to_global_addrspacecast(ptr addrspace(4) %ptr) #0 {
|
|
128 %stof = addrspacecast ptr addrspace(4) %ptr to ptr addrspace(1)
|
|
129 %ld = load volatile i32, ptr addrspace(1) %stof
|
173
|
130 ret void
|
|
131 }
|
|
132
|
150
|
133 ; HSA-LABEL: {{^}}use_flat_to_group_addrspacecast:
|
|
134 ; HSA: enable_sgpr_private_segment_buffer = 1
|
|
135 ; HSA: enable_sgpr_dispatch_ptr = 0
|
|
136 ; HSA: enable_sgpr_queue_ptr = 0
|
|
137
|
236
|
138 ; HSA: s_load_dwordx2 s[[[PTR_LO:[0-9]+]]:[[PTR_HI:[0-9]+]]]
|
|
139 ; CI-DAG: v_cmp_ne_u64_e64 s[[[CMP_LO:[0-9]+]]:[[CMP_HI:[0-9]+]]], s[[[PTR_LO]]:[[PTR_HI]]], 0{{$}}
|
|
140 ; CI-DAG: s_and_b64 s{{[[0-9]+:[0-9]+]}}, s[[[CMP_LO]]:[[CMP_HI]]], exec
|
|
141 ; CI-DAG: s_cselect_b32 [[CASTPTR:s[0-9]+]], s[[PTR_LO]], -1
|
|
142 ; CI-DAG: v_mov_b32_e32 [[VCASTPTR:v[0-9]+]], [[CASTPTR]]
|
150
|
143 ; HSA-DAG: v_mov_b32_e32 v[[K:[0-9]+]], 0{{$}}
|
236
|
144 ; GFX9-DAG: s_cmp_lg_u64 s[[[CMP_LO:[0-9]+]]:[[CMP_HI:[0-9]+]]], 0
|
221
|
145 ; GFX9-DAG: s_cselect_b32 s[[PTR_LO]], s[[PTR_LO]], -1
|
|
146 ; GFX9-DAG: v_mov_b32_e32 [[CASTPTR:v[0-9]+]], s[[PTR_LO]]
|
236
|
147 ; CI-DAG: ds_write_b32 [[VCASTPTR]], v[[K]]
|
|
148 ; GFX9-DAG: ds_write_b32 [[CASTPTR]], v[[K]]
|
252
|
149 define amdgpu_kernel void @use_flat_to_group_addrspacecast(ptr %ptr) #0 {
|
|
150 %ftos = addrspacecast ptr %ptr to ptr addrspace(3)
|
|
151 store volatile i32 0, ptr addrspace(3) %ftos
|
150
|
152 ret void
|
|
153 }
|
|
154
|
|
155 ; HSA-LABEL: {{^}}use_flat_to_private_addrspacecast:
|
|
156 ; HSA: enable_sgpr_private_segment_buffer = 1
|
|
157 ; HSA: enable_sgpr_dispatch_ptr = 0
|
|
158 ; HSA: enable_sgpr_queue_ptr = 0
|
|
159
|
236
|
160 ; HSA: s_load_dwordx2 s[[[PTR_LO:[0-9]+]]:[[PTR_HI:[0-9]+]]]
|
|
161 ; CI-DAG v_cmp_ne_u64_e64 vcc, s[[[PTR_LO]]:[[PTR_HI]]], 0{{$}}
|
|
162 ; CI-DAG v_mov_b32_e32 v[[VPTR_LO:[0-9]+]], s[[PTR_LO]]
|
|
163 ; CI-DAG v_cndmask_b32_e32 [[CASTPTR:v[0-9]+]], -1, v[[VPTR_LO]]
|
|
164 ; CI-DAG: v_cmp_ne_u64_e64 s[[[CMP_LO:[0-9]+]]:[[CMP_HI:[0-9]+]]], s[[[PTR_LO]]:[[PTR_HI]]], 0{{$}}
|
|
165 ; CI-DAG: s_and_b64 s{{[[0-9]+:[0-9]+]}}, s[[[CMP_LO]]:[[CMP_HI]]], exec
|
|
166 ; CI-DAG: s_cselect_b32 [[CASTPTR:s[0-9]+]], s[[PTR_LO]], -1
|
|
167 ; CI-DAG: v_mov_b32_e32 [[VCASTPTR:v[0-9]+]], [[CASTPTR]]
|
150
|
168 ; HSA-DAG: v_mov_b32_e32 v[[K:[0-9]+]], 0{{$}}
|
236
|
169 ; GFX9-DAG: s_cmp_lg_u64 s[[[CMP_LO:[0-9]+]]:[[CMP_HI:[0-9]+]]], 0
|
221
|
170 ; GFX9-DAG: s_cselect_b32 s[[PTR_LO]], s[[PTR_LO]], -1
|
|
171 ; GFX9-DAG: v_mov_b32_e32 [[CASTPTR:v[0-9]+]], s[[PTR_LO]]
|
236
|
172 ; CI: buffer_store_dword v[[K]], [[VCASTPTR]], s{{\[[0-9]+:[0-9]+\]}}, 0 offen{{$}}
|
|
173 ; GFX9: buffer_store_dword v[[K]], [[CASTPTR]], s{{\[[0-9]+:[0-9]+\]}}, 0 offen{{$}}
|
252
|
174 define amdgpu_kernel void @use_flat_to_private_addrspacecast(ptr %ptr) #0 {
|
|
175 %ftos = addrspacecast ptr %ptr to ptr addrspace(5)
|
|
176 store volatile i32 0, ptr addrspace(5) %ftos
|
150
|
177 ret void
|
|
178 }
|
|
179
|
|
180 ; HSA-LABEL: {{^}}use_flat_to_global_addrspacecast:
|
|
181 ; HSA: enable_sgpr_queue_ptr = 0
|
|
182
|
236
|
183 ; HSA: s_load_dwordx2 s[[[PTRLO:[0-9]+]]:[[PTRHI:[0-9]+]]], s[4:5], 0x0
|
221
|
184 ; CI-DAG: v_mov_b32_e32 v[[VPTRLO:[0-9]+]], s[[PTRLO]]
|
|
185 ; CI-DAG: v_mov_b32_e32 v[[VPTRHI:[0-9]+]], s[[PTRHI]]
|
|
186 ; CI-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0
|
236
|
187 ; CI: flat_store_dword v[[[VPTRLO]]:[[VPTRHI]]], [[K]]
|
221
|
188
|
|
189 ; GFX9: v_mov_b32_e32 [[ZERO:v[0-9]+]], 0
|
236
|
190 ; GFX9: global_store_dword [[ZERO]], [[ZERO]], s[[[PTRLO]]:[[PTRHI]]{{\]$}}
|
252
|
191 define amdgpu_kernel void @use_flat_to_global_addrspacecast(ptr %ptr) #0 {
|
|
192 %ftos = addrspacecast ptr %ptr to ptr addrspace(1)
|
|
193 store volatile i32 0, ptr addrspace(1) %ftos
|
150
|
194 ret void
|
|
195 }
|
|
196
|
|
197 ; HSA-LABEL: {{^}}use_flat_to_constant_addrspacecast:
|
|
198 ; HSA: enable_sgpr_queue_ptr = 0
|
|
199
|
236
|
200 ; HSA: s_load_dwordx2 s[[[PTRLO:[0-9]+]]:[[PTRHI:[0-9]+]]], s[4:5], 0x0
|
|
201 ; HSA: s_load_dword s{{[0-9]+}}, s[[[PTRLO]]:[[PTRHI]]], 0x0
|
252
|
202 define amdgpu_kernel void @use_flat_to_constant_addrspacecast(ptr %ptr) #0 {
|
|
203 %ftos = addrspacecast ptr %ptr to ptr addrspace(4)
|
|
204 load volatile i32, ptr addrspace(4) %ftos
|
150
|
205 ret void
|
|
206 }
|
|
207
|
|
208 ; HSA-LABEL: {{^}}cast_0_group_to_flat_addrspacecast:
|
|
209 ; CI: s_load_dword [[APERTURE:s[0-9]+]], s[4:5], 0x10
|
|
210 ; CI-DAG: v_mov_b32_e32 v[[HI:[0-9]+]], [[APERTURE]]
|
|
211
|
252
|
212 ; GFX9-DAG: s_mov_b64 s[{{[0-9]+}}:[[HI:[0-9]+]]], src_shared_base
|
150
|
213
|
|
214 ; HSA-DAG: v_mov_b32_e32 v[[LO:[0-9]+]], 0{{$}}
|
|
215 ; HSA-DAG: v_mov_b32_e32 v[[K:[0-9]+]], 7{{$}}
|
236
|
216 ; HSA: {{flat|global}}_store_dword v[[[LO]]:[[HI]]], v[[K]]
|
150
|
217 define amdgpu_kernel void @cast_0_group_to_flat_addrspacecast() #0 {
|
252
|
218 %cast = addrspacecast ptr addrspace(3) null to ptr
|
|
219 store volatile i32 7, ptr %cast
|
150
|
220 ret void
|
|
221 }
|
|
222
|
|
223 ; HSA-LABEL: {{^}}cast_0_flat_to_group_addrspacecast:
|
|
224 ; HSA-DAG: v_mov_b32_e32 [[PTR:v[0-9]+]], -1{{$}}
|
|
225 ; HSA-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 7{{$}}
|
|
226 ; HSA: ds_write_b32 [[PTR]], [[K]]
|
|
227 define amdgpu_kernel void @cast_0_flat_to_group_addrspacecast() #0 {
|
252
|
228 %cast = addrspacecast ptr null to ptr addrspace(3)
|
|
229 store volatile i32 7, ptr addrspace(3) %cast
|
150
|
230 ret void
|
|
231 }
|
|
232
|
|
233 ; HSA-LABEL: {{^}}cast_neg1_group_to_flat_addrspacecast:
|
|
234 ; HSA: v_mov_b32_e32 v[[LO:[0-9]+]], 0{{$}}
|
223
|
235 ; HSA-DAG: v_mov_b32_e32 v[[K:[0-9]+]], 7{{$}}
|
|
236 ; HSA-DAG: v_mov_b32_e32 v[[HI:[0-9]+]], 0{{$}}
|
236
|
237 ; HSA: {{flat|global}}_store_dword v[[[LO]]:[[HI]]], v[[K]]
|
150
|
238 define amdgpu_kernel void @cast_neg1_group_to_flat_addrspacecast() #0 {
|
252
|
239 %cast = addrspacecast ptr addrspace(3) inttoptr (i32 -1 to ptr addrspace(3)) to ptr
|
|
240 store volatile i32 7, ptr %cast
|
150
|
241 ret void
|
|
242 }
|
|
243
|
|
244 ; HSA-LABEL: {{^}}cast_neg1_flat_to_group_addrspacecast:
|
|
245 ; HSA-DAG: v_mov_b32_e32 [[PTR:v[0-9]+]], -1{{$}}
|
|
246 ; HSA-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 7{{$}}
|
|
247 ; HSA: ds_write_b32 [[PTR]], [[K]]
|
|
248 define amdgpu_kernel void @cast_neg1_flat_to_group_addrspacecast() #0 {
|
252
|
249 %cast = addrspacecast ptr inttoptr (i64 -1 to ptr) to ptr addrspace(3)
|
|
250 store volatile i32 7, ptr addrspace(3) %cast
|
150
|
251 ret void
|
|
252 }
|
|
253
|
|
254 ; FIXME: Shouldn't need to enable queue ptr
|
|
255 ; HSA-LABEL: {{^}}cast_0_private_to_flat_addrspacecast:
|
221
|
256 ; CI: s_load_dword [[APERTURE:s[0-9]+]], s[4:5], 0x11
|
|
257 ; CI-DAG: v_mov_b32_e32 v[[HI:[0-9]+]], [[APERTURE]]
|
|
258
|
252
|
259 ; GFX9-DAG: s_mov_b64 s[{{[0-9]+}}:[[HI:[0-9]+]]], src_private_base
|
150
|
260
|
|
261 ; HSA-DAG: v_mov_b32_e32 v[[LO:[0-9]+]], 0{{$}}
|
|
262 ; HSA-DAG: v_mov_b32_e32 v[[K:[0-9]+]], 7{{$}}
|
236
|
263 ; HSA: {{flat|global}}_store_dword v[[[LO]]:[[HI]]], v[[K]]
|
150
|
264 define amdgpu_kernel void @cast_0_private_to_flat_addrspacecast() #0 {
|
252
|
265 %cast = addrspacecast ptr addrspace(5) null to ptr
|
|
266 store volatile i32 7, ptr %cast
|
150
|
267 ret void
|
|
268 }
|
|
269
|
|
270 ; HSA-LABEL: {{^}}cast_0_flat_to_private_addrspacecast:
|
221
|
271 ; HSA-DAG: v_mov_b32_e32 [[PTR:v[0-9]+]], -1{{$}}
|
|
272 ; HSA-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 7{{$}}
|
|
273 ; HSA: buffer_store_dword [[K]], [[PTR]], s{{\[[0-9]+:[0-9]+\]}}, 0
|
150
|
274 define amdgpu_kernel void @cast_0_flat_to_private_addrspacecast() #0 {
|
252
|
275 %cast = addrspacecast ptr null to ptr addrspace(5)
|
|
276 store volatile i32 7, ptr addrspace(5) %cast
|
150
|
277 ret void
|
|
278 }
|
|
279
|
221
|
280
|
|
281 ; HSA-LABEL: {{^}}cast_neg1_private_to_flat_addrspacecast:
|
|
282 ; CI: enable_sgpr_queue_ptr = 1
|
|
283 ; GFX9: enable_sgpr_queue_ptr = 0
|
|
284
|
223
|
285 ; HSA: v_mov_b32_e32 v[[LO:[0-9]+]], 0{{$}}
|
221
|
286 ; HSA-DAG: v_mov_b32_e32 v[[K:[0-9]+]], 7{{$}}
|
223
|
287 ; HSA-DAG: v_mov_b32_e32 v[[HI:[0-9]+]], 0{{$}}
|
236
|
288 ; HSA: {{flat|global}}_store_dword v[[[LO]]:[[HI]]], v[[K]]
|
221
|
289 define amdgpu_kernel void @cast_neg1_private_to_flat_addrspacecast() #0 {
|
252
|
290 %cast = addrspacecast ptr addrspace(5) inttoptr (i32 -1 to ptr addrspace(5)) to ptr
|
|
291 store volatile i32 7, ptr %cast
|
221
|
292 ret void
|
|
293 }
|
|
294
|
|
295 ; HSA-LABEL: {{^}}cast_neg1_flat_to_private_addrspacecast:
|
|
296 ; HSA-DAG: v_mov_b32_e32 [[PTR:v[0-9]+]], -1{{$}}
|
|
297 ; HSA-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 7{{$}}
|
|
298 ; HSA: buffer_store_dword [[K]], [[PTR]], s{{\[[0-9]+:[0-9]+\]}}, 0
|
|
299 define amdgpu_kernel void @cast_neg1_flat_to_private_addrspacecast() #0 {
|
252
|
300 %cast = addrspacecast ptr inttoptr (i64 -1 to ptr) to ptr addrspace(5)
|
|
301 store volatile i32 7, ptr addrspace(5) %cast
|
221
|
302 ret void
|
|
303 }
|
|
304
|
|
305
|
150
|
306 ; Disable optimizations in case there are optimizations added that
|
|
307 ; specialize away generic pointer accesses.
|
|
308
|
|
309 ; HSA-LABEL: {{^}}branch_use_flat_i32:
|
|
310 ; HSA: {{flat|global}}_store_dword {{v\[[0-9]+:[0-9]+\]}}, {{v[0-9]+}}
|
|
311 ; HSA: s_endpgm
|
252
|
312 define amdgpu_kernel void @branch_use_flat_i32(ptr addrspace(1) noalias %out, ptr addrspace(1) %gptr, ptr addrspace(3) %lptr, i32 %x, i32 %c) #0 {
|
150
|
313 entry:
|
|
314 %cmp = icmp ne i32 %c, 0
|
|
315 br i1 %cmp, label %local, label %global
|
|
316
|
|
317 local:
|
252
|
318 %flat_local = addrspacecast ptr addrspace(3) %lptr to ptr
|
150
|
319 br label %end
|
|
320
|
|
321 global:
|
252
|
322 %flat_global = addrspacecast ptr addrspace(1) %gptr to ptr
|
150
|
323 br label %end
|
|
324
|
|
325 end:
|
252
|
326 %fptr = phi ptr [ %flat_local, %local ], [ %flat_global, %global ]
|
|
327 store volatile i32 %x, ptr %fptr, align 4
|
|
328 ; %val = load i32, ptr %fptr, align 4
|
|
329 ; store i32 %val, ptr addrspace(1) %out, align 4
|
150
|
330 ret void
|
|
331 }
|
|
332
|
|
333 ; Check for prologue initializing special SGPRs pointing to scratch.
|
|
334 ; HSA-LABEL: {{^}}store_flat_scratch:
|
|
335 ; CI-DAG: s_mov_b32 flat_scratch_lo, s9
|
223
|
336 ; CI-DAG: s_add_i32 [[ADD:s[0-9]+]], s8, s11
|
221
|
337 ; CI-DAG: s_lshr_b32 flat_scratch_hi, [[ADD]], 8
|
150
|
338
|
|
339 ; GFX9: s_add_u32 flat_scratch_lo, s6, s9
|
|
340 ; GFX9: s_addc_u32 flat_scratch_hi, s7, 0
|
|
341
|
|
342 ; HSA: {{flat|global}}_store_dword
|
|
343 ; HSA: s_barrier
|
|
344 ; HSA: {{flat|global}}_load_dword
|
252
|
345 define amdgpu_kernel void @store_flat_scratch(ptr addrspace(1) noalias %out, i32) #0 {
|
150
|
346 %alloca = alloca i32, i32 9, align 4, addrspace(5)
|
|
347 %x = call i32 @llvm.amdgcn.workitem.id.x() #2
|
252
|
348 %pptr = getelementptr i32, ptr addrspace(5) %alloca, i32 %x
|
|
349 %fptr = addrspacecast ptr addrspace(5) %pptr to ptr
|
|
350 store volatile i32 %x, ptr %fptr
|
150
|
351 ; Dummy call
|
|
352 call void @llvm.amdgcn.s.barrier() #1
|
252
|
353 %reload = load volatile i32, ptr %fptr, align 4
|
|
354 store volatile i32 %reload, ptr addrspace(1) %out, align 4
|
150
|
355 ret void
|
|
356 }
|
|
357
|
173
|
358 ; HSA-LABEL: {{^}}use_constant_to_constant32_addrspacecast
|
|
359 ; GFX9: s_load_dwordx2 [[PTRPTR:s\[[0-9]+:[0-9]+\]]], s[4:5], 0x0{{$}}
|
|
360 ; GFX9: s_load_dword [[OFFSET:s[0-9]+]], s[4:5], 0x8{{$}}
|
236
|
361 ; GFX9: s_load_dwordx2 s[[[PTR_LO:[0-9]+]]:[[PTR_HI:[0-9]+]]], [[PTRPTR]], 0x0{{$}}
|
173
|
362 ; GFX9: s_mov_b32 s[[PTR_HI]], 0{{$}}
|
|
363 ; GFX9: s_add_i32 s[[PTR_LO]], s[[PTR_LO]], [[OFFSET]]
|
236
|
364 ; GFX9: s_load_dword s{{[0-9]+}}, s[[[PTR_LO]]:[[PTR_HI]]], 0x0{{$}}
|
252
|
365 define amdgpu_kernel void @use_constant_to_constant32_addrspacecast(ptr addrspace(4) %ptr.ptr, i32 %offset) #0 {
|
|
366 %ptr = load volatile ptr addrspace(4), ptr addrspace(4) %ptr.ptr
|
|
367 %addrspacecast = addrspacecast ptr addrspace(4) %ptr to ptr addrspace(6)
|
|
368 %gep = getelementptr i8, ptr addrspace(6) %addrspacecast, i32 %offset
|
|
369 %load = load volatile i32, ptr addrspace(6) %gep, align 4
|
173
|
370 ret void
|
|
371 }
|
|
372
|
|
373 ; HSA-LABEL: {{^}}use_global_to_constant32_addrspacecast
|
|
374 ; GFX9: s_load_dwordx2 [[PTRPTR:s\[[0-9]+:[0-9]+\]]], s[4:5], 0x0{{$}}
|
|
375 ; GFX9: s_load_dword [[OFFSET:s[0-9]+]], s[4:5], 0x8{{$}}
|
236
|
376 ; GFX9: s_load_dwordx2 s[[[PTR_LO:[0-9]+]]:[[PTR_HI:[0-9]+]]], [[PTRPTR]], 0x0{{$}}
|
173
|
377 ; GFX9: s_mov_b32 s[[PTR_HI]], 0{{$}}
|
|
378 ; GFX9: s_add_i32 s[[PTR_LO]], s[[PTR_LO]], [[OFFSET]]
|
236
|
379 ; GFX9: s_load_dword s{{[0-9]+}}, s[[[PTR_LO]]:[[PTR_HI]]], 0x0{{$}}
|
252
|
380 define amdgpu_kernel void @use_global_to_constant32_addrspacecast(ptr addrspace(4) %ptr.ptr, i32 %offset) #0 {
|
|
381 %ptr = load volatile ptr addrspace(1), ptr addrspace(4) %ptr.ptr
|
|
382 %addrspacecast = addrspacecast ptr addrspace(1) %ptr to ptr addrspace(6)
|
|
383 %gep = getelementptr i8, ptr addrspace(6) %addrspacecast, i32 %offset
|
|
384 %load = load volatile i32, ptr addrspace(6) %gep, align 4
|
173
|
385 ret void
|
|
386 }
|
|
387
|
236
|
388 ; GCN-LABEL: {{^}}use_constant32bit_to_flat_addrspacecast_0:
|
|
389 ; GCN: s_load_dword [[PTR:s[0-9]+]],
|
|
390 ; GCN: v_mov_b32_e32 v[[HI:[0-9]+]], 0
|
|
391 ; GCN: v_mov_b32_e32 v[[LO:[0-9]+]], [[PTR]]
|
|
392 ; GCN: flat_load_dword v{{[0-9]+}}, v[[[LO]]:[[HI]]]
|
252
|
393 define amdgpu_kernel void @use_constant32bit_to_flat_addrspacecast_0(ptr addrspace(6) %ptr) #0 {
|
|
394 %stof = addrspacecast ptr addrspace(6) %ptr to ptr
|
|
395 %load = load volatile i32, ptr %stof
|
236
|
396 ret void
|
|
397 }
|
|
398
|
|
399 ; GCN-LABEL: {{^}}use_constant32bit_to_flat_addrspacecast_1:
|
|
400 ; GCN: s_load_dword [[PTR:s[0-9]+]],
|
|
401 ; GCN: v_mov_b32_e32 v[[HI:[0-9]+]], 0xffff8000
|
|
402 ; GCN: v_mov_b32_e32 v[[LO:[0-9]+]], [[PTR]]
|
|
403 ; GCN: flat_load_dword v{{[0-9]+}}, v[[[LO]]:[[HI]]]
|
252
|
404 define amdgpu_kernel void @use_constant32bit_to_flat_addrspacecast_1(ptr addrspace(6) %ptr) #3 {
|
|
405 %stof = addrspacecast ptr addrspace(6) %ptr to ptr
|
|
406 %load = load volatile i32, ptr %stof
|
236
|
407 ret void
|
|
408 }
|
|
409
|
150
|
410 declare void @llvm.amdgcn.s.barrier() #1
|
|
411 declare i32 @llvm.amdgcn.workitem.id.x() #2
|
|
412
|
|
413 attributes #0 = { nounwind }
|
|
414 attributes #1 = { nounwind convergent }
|
|
415 attributes #2 = { nounwind readnone }
|
236
|
416 attributes #3 = { nounwind "amdgpu-32bit-address-high-bits"="0xffff8000" }
|
252
|
417
|
|
418 !llvm.module.flags = !{!0}
|
|
419 !0 = !{i32 1, !"amdgpu_code_object_version", i32 200}
|