221
|
1 ; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx908 -verify-machineinstrs < %s | FileCheck -enable-var-scope --check-prefix=GCN %s
|
|
2
|
|
3 ; GCN-LABEL: {{^}}shl_base_atomicrmw_global_ptr:
|
|
4 ; GCN: v_add_co_u32_e32 v[[EXTRA_LO:[0-9]+]], vcc, 0x80, v4
|
|
5 ; GCN: v_addc_co_u32_e32 v[[EXTRA_HI:[0-9]+]], vcc, 0, v5, vcc
|
|
6 ; GCN: v_lshlrev_b64 v{{\[}}[[LO:[0-9]+]]:[[HI:[0-9]+]]{{\]}}, 2, v[4:5]
|
|
7 ; GCN: v_mov_b32_e32 [[THREE:v[0-9]+]], 3
|
|
8 ; GCN: global_atomic_and v{{\[}}[[LO]]:[[HI]]{{\]}}, [[THREE]], off offset:512
|
|
9 ; GCN: global_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[EXTRA_LO]]:[[EXTRA_HI]]{{\]}}
|
|
10 define void @shl_base_atomicrmw_global_ptr(i32 addrspace(1)* %out, i64 addrspace(1)* %extra.use, [512 x i32] addrspace(1)* %ptr) #0 {
|
|
11 %arrayidx0 = getelementptr inbounds [512 x i32], [512 x i32] addrspace(1)* %ptr, i64 0, i64 32
|
|
12 %cast = ptrtoint i32 addrspace(1)* %arrayidx0 to i64
|
|
13 %shl = shl i64 %cast, 2
|
|
14 %castback = inttoptr i64 %shl to i32 addrspace(1)*
|
|
15 %val = atomicrmw and i32 addrspace(1)* %castback, i32 3 seq_cst
|
|
16 store volatile i64 %cast, i64 addrspace(1)* %extra.use, align 4
|
|
17 ret void
|
|
18 }
|
|
19
|
|
20 ; GCN-LABEL: {{^}}shl_base_global_ptr_global_atomic_fadd:
|
|
21 ; GCN: v_add_co_u32_e32 v[[EXTRA_LO:[0-9]+]], vcc, 0x80, v4
|
|
22 ; GCN: v_addc_co_u32_e32 v[[EXTRA_HI:[0-9]+]], vcc, 0, v5, vcc
|
|
23 ; GCN: v_lshlrev_b64 v{{\[}}[[LO:[0-9]+]]:[[HI:[0-9]+]]{{\]}}, 2, v[4:5]
|
|
24 ; GCN: v_mov_b32_e32 [[K:v[0-9]+]], 0x42c80000
|
|
25 ; GCN: global_atomic_add_f32 v{{\[}}[[LO]]:[[HI]]{{\]}}, [[K]], off offset:512
|
|
26 ; GCN: global_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[}}[[EXTRA_LO]]:[[EXTRA_HI]]{{\]}}
|
|
27 define void @shl_base_global_ptr_global_atomic_fadd(i32 addrspace(1)* %out, i64 addrspace(1)* %extra.use, [512 x i32] addrspace(1)* %ptr) #0 {
|
|
28 %arrayidx0 = getelementptr inbounds [512 x i32], [512 x i32] addrspace(1)* %ptr, i64 0, i64 32
|
|
29 %cast = ptrtoint i32 addrspace(1)* %arrayidx0 to i64
|
|
30 %shl = shl i64 %cast, 2
|
|
31 %castback = inttoptr i64 %shl to float addrspace(1)*
|
|
32 call float @llvm.amdgcn.global.atomic.fadd.f32.p1f32.f32(float addrspace(1)* %castback, float 100.0)
|
|
33 store volatile i64 %cast, i64 addrspace(1)* %extra.use, align 4
|
|
34 ret void
|
|
35 }
|
|
36
|
|
37 declare float @llvm.amdgcn.global.atomic.fadd.f32.p1f32.f32(float addrspace(1)* nocapture, float) #1
|
|
38
|
|
39 attributes #0 = { nounwind }
|
|
40 attributes #1 = { argmemonly nounwind willreturn }
|