150
|
1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
2 ; RUN: llc < %s -mtriple=amdgcn-- -verify-machineinstrs | FileCheck %s -check-prefix=GCN
|
|
3
|
|
4 declare i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
|
|
5
|
|
6 ; Make sure the add and load are reduced to 32-bits even with the
|
|
7 ; bitcast to vector.
|
|
8 define amdgpu_kernel void @bitcast_int_to_vector_extract_0(i32 addrspace(1)* %out, i64 addrspace(1)* %in, i64 %b) {
|
|
9 ; GCN-LABEL: bitcast_int_to_vector_extract_0:
|
|
10 ; GCN: ; %bb.0:
|
|
11 ; GCN-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
|
12 ; GCN-NEXT: s_load_dword s12, s[0:1], 0xd
|
|
13 ; GCN-NEXT: s_mov_b32 s3, 0xf000
|
|
14 ; GCN-NEXT: s_mov_b32 s10, 0
|
|
15 ; GCN-NEXT: v_lshlrev_b32_e32 v0, 3, v0
|
|
16 ; GCN-NEXT: v_mov_b32_e32 v1, 0
|
|
17 ; GCN-NEXT: s_mov_b32 s11, s3
|
|
18 ; GCN-NEXT: s_waitcnt lgkmcnt(0)
|
|
19 ; GCN-NEXT: s_mov_b64 s[8:9], s[6:7]
|
|
20 ; GCN-NEXT: buffer_load_dword v0, v[0:1], s[8:11], 0 addr64
|
|
21 ; GCN-NEXT: s_mov_b32 s2, -1
|
|
22 ; GCN-NEXT: s_mov_b32 s0, s4
|
|
23 ; GCN-NEXT: s_mov_b32 s1, s5
|
|
24 ; GCN-NEXT: s_waitcnt vmcnt(0)
|
|
25 ; GCN-NEXT: v_add_i32_e32 v0, vcc, s12, v0
|
|
26 ; GCN-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
27 ; GCN-NEXT: s_endpgm
|
|
28 %tid = call i32 @llvm.amdgcn.workitem.id.x()
|
|
29 %gep = getelementptr i64, i64 addrspace(1)* %in, i32 %tid
|
|
30 %a = load i64, i64 addrspace(1)* %gep
|
|
31 %add = add i64 %a, %b
|
|
32 %val.bc = bitcast i64 %add to <2 x i32>
|
|
33 %extract = extractelement <2 x i32> %val.bc, i32 0
|
|
34 store i32 %extract, i32 addrspace(1)* %out
|
|
35 ret void
|
|
36 }
|
|
37
|
|
38 define amdgpu_kernel void @bitcast_fp_to_vector_extract_0(i32 addrspace(1)* %out, double addrspace(1)* %in, double %b) {
|
|
39 ; GCN-LABEL: bitcast_fp_to_vector_extract_0:
|
|
40 ; GCN: ; %bb.0:
|
|
41 ; GCN-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
|
42 ; GCN-NEXT: s_load_dwordx2 s[12:13], s[0:1], 0xd
|
|
43 ; GCN-NEXT: s_mov_b32 s3, 0xf000
|
|
44 ; GCN-NEXT: s_mov_b32 s10, 0
|
|
45 ; GCN-NEXT: v_lshlrev_b32_e32 v0, 3, v0
|
|
46 ; GCN-NEXT: v_mov_b32_e32 v1, 0
|
|
47 ; GCN-NEXT: s_mov_b32 s11, s3
|
|
48 ; GCN-NEXT: s_waitcnt lgkmcnt(0)
|
|
49 ; GCN-NEXT: s_mov_b64 s[8:9], s[6:7]
|
|
50 ; GCN-NEXT: buffer_load_dwordx2 v[0:1], v[0:1], s[8:11], 0 addr64
|
|
51 ; GCN-NEXT: s_mov_b32 s2, -1
|
|
52 ; GCN-NEXT: s_mov_b32 s0, s4
|
|
53 ; GCN-NEXT: s_mov_b32 s1, s5
|
|
54 ; GCN-NEXT: s_waitcnt vmcnt(0)
|
|
55 ; GCN-NEXT: v_add_f64 v[0:1], v[0:1], s[12:13]
|
|
56 ; GCN-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
57 ; GCN-NEXT: s_endpgm
|
|
58 %tid = call i32 @llvm.amdgcn.workitem.id.x()
|
|
59 %gep = getelementptr double, double addrspace(1)* %in, i32 %tid
|
|
60 %a = load double, double addrspace(1)* %gep
|
|
61 %add = fadd double %a, %b
|
|
62 %val.bc = bitcast double %add to <2 x i32>
|
|
63 %extract = extractelement <2 x i32> %val.bc, i32 0
|
|
64 store i32 %extract, i32 addrspace(1)* %out
|
|
65 ret void
|
|
66 }
|
|
67
|
|
68 define amdgpu_kernel void @bitcast_int_to_fpvector_extract_0(float addrspace(1)* %out, i64 addrspace(1)* %in, i64 %b) {
|
|
69 ; GCN-LABEL: bitcast_int_to_fpvector_extract_0:
|
|
70 ; GCN: ; %bb.0:
|
|
71 ; GCN-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
173
|
72 ; GCN-NEXT: s_load_dword s12, s[0:1], 0xd
|
150
|
73 ; GCN-NEXT: s_mov_b32 s3, 0xf000
|
|
74 ; GCN-NEXT: s_mov_b32 s10, 0
|
|
75 ; GCN-NEXT: v_lshlrev_b32_e32 v0, 3, v0
|
|
76 ; GCN-NEXT: v_mov_b32_e32 v1, 0
|
|
77 ; GCN-NEXT: s_mov_b32 s11, s3
|
|
78 ; GCN-NEXT: s_waitcnt lgkmcnt(0)
|
|
79 ; GCN-NEXT: s_mov_b64 s[8:9], s[6:7]
|
173
|
80 ; GCN-NEXT: buffer_load_dword v0, v[0:1], s[8:11], 0 addr64
|
150
|
81 ; GCN-NEXT: s_mov_b32 s2, -1
|
|
82 ; GCN-NEXT: s_mov_b32 s0, s4
|
|
83 ; GCN-NEXT: s_mov_b32 s1, s5
|
|
84 ; GCN-NEXT: s_waitcnt vmcnt(0)
|
|
85 ; GCN-NEXT: v_add_i32_e32 v0, vcc, s12, v0
|
|
86 ; GCN-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
87 ; GCN-NEXT: s_endpgm
|
|
88 %tid = call i32 @llvm.amdgcn.workitem.id.x()
|
|
89 %gep = getelementptr i64, i64 addrspace(1)* %in, i32 %tid
|
|
90 %a = load i64, i64 addrspace(1)* %gep
|
|
91 %add = add i64 %a, %b
|
|
92 %val.bc = bitcast i64 %add to <2 x float>
|
|
93 %extract = extractelement <2 x float> %val.bc, i32 0
|
|
94 store float %extract, float addrspace(1)* %out
|
|
95 ret void
|
|
96 }
|
|
97
|
|
98 define amdgpu_kernel void @no_extract_volatile_load_extract0(i32 addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
|
|
99 ; GCN-LABEL: no_extract_volatile_load_extract0:
|
|
100 ; GCN: ; %bb.0: ; %entry
|
|
101 ; GCN-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
|
|
102 ; GCN-NEXT: s_mov_b32 s7, 0xf000
|
|
103 ; GCN-NEXT: s_mov_b32 s6, -1
|
|
104 ; GCN-NEXT: s_mov_b32 s10, s6
|
|
105 ; GCN-NEXT: s_mov_b32 s11, s7
|
|
106 ; GCN-NEXT: s_waitcnt lgkmcnt(0)
|
|
107 ; GCN-NEXT: s_mov_b32 s8, s2
|
|
108 ; GCN-NEXT: s_mov_b32 s9, s3
|
|
109 ; GCN-NEXT: buffer_load_dwordx4 v[0:3], off, s[8:11], 0
|
|
110 ; GCN-NEXT: s_mov_b32 s4, s0
|
|
111 ; GCN-NEXT: s_mov_b32 s5, s1
|
|
112 ; GCN-NEXT: s_waitcnt vmcnt(0)
|
|
113 ; GCN-NEXT: buffer_store_dword v0, off, s[4:7], 0
|
|
114 ; GCN-NEXT: s_endpgm
|
|
115 entry:
|
|
116 %vec = load volatile <4 x i32>, <4 x i32> addrspace(1)* %in
|
|
117 %elt0 = extractelement <4 x i32> %vec, i32 0
|
|
118 store i32 %elt0, i32 addrspace(1)* %out
|
|
119 ret void
|
|
120 }
|
|
121
|
|
122 define amdgpu_kernel void @no_extract_volatile_load_extract2(i32 addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
|
|
123 ; GCN-LABEL: no_extract_volatile_load_extract2:
|
|
124 ; GCN: ; %bb.0: ; %entry
|
|
125 ; GCN-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
|
|
126 ; GCN-NEXT: s_mov_b32 s7, 0xf000
|
|
127 ; GCN-NEXT: s_mov_b32 s6, -1
|
|
128 ; GCN-NEXT: s_mov_b32 s10, s6
|
|
129 ; GCN-NEXT: s_mov_b32 s11, s7
|
|
130 ; GCN-NEXT: s_waitcnt lgkmcnt(0)
|
|
131 ; GCN-NEXT: s_mov_b32 s8, s2
|
|
132 ; GCN-NEXT: s_mov_b32 s9, s3
|
|
133 ; GCN-NEXT: buffer_load_dwordx4 v[0:3], off, s[8:11], 0
|
|
134 ; GCN-NEXT: s_mov_b32 s4, s0
|
|
135 ; GCN-NEXT: s_mov_b32 s5, s1
|
|
136 ; GCN-NEXT: s_waitcnt vmcnt(0)
|
|
137 ; GCN-NEXT: buffer_store_dword v2, off, s[4:7], 0
|
|
138 ; GCN-NEXT: s_endpgm
|
|
139 entry:
|
|
140 %vec = load volatile <4 x i32>, <4 x i32> addrspace(1)* %in
|
|
141 %elt2 = extractelement <4 x i32> %vec, i32 2
|
|
142 store i32 %elt2, i32 addrspace(1)* %out
|
|
143 ret void
|
|
144 }
|
|
145
|
|
146 define amdgpu_kernel void @no_extract_volatile_load_dynextract(i32 addrspace(1)* %out, <4 x i32> addrspace(1)* %in, i32 %idx) {
|
|
147 ; GCN-LABEL: no_extract_volatile_load_dynextract:
|
|
148 ; GCN: ; %bb.0: ; %entry
|
|
149 ; GCN-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x9
|
|
150 ; GCN-NEXT: s_mov_b32 s3, 0xf000
|
|
151 ; GCN-NEXT: s_mov_b32 s2, -1
|
|
152 ; GCN-NEXT: s_load_dword s12, s[0:1], 0xd
|
|
153 ; GCN-NEXT: s_mov_b32 s10, s2
|
|
154 ; GCN-NEXT: s_mov_b32 s11, s3
|
|
155 ; GCN-NEXT: s_waitcnt lgkmcnt(0)
|
|
156 ; GCN-NEXT: s_mov_b32 s8, s6
|
|
157 ; GCN-NEXT: s_mov_b32 s9, s7
|
|
158 ; GCN-NEXT: buffer_load_dwordx4 v[0:3], off, s[8:11], 0
|
|
159 ; GCN-NEXT: s_mov_b32 s0, s4
|
|
160 ; GCN-NEXT: s_mov_b32 s1, s5
|
|
161 ; GCN-NEXT: v_cmp_eq_u32_e64 vcc, s12, 1
|
|
162 ; GCN-NEXT: s_waitcnt vmcnt(0)
|
|
163 ; GCN-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
|
|
164 ; GCN-NEXT: v_cmp_eq_u32_e64 vcc, s12, 2
|
|
165 ; GCN-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
|
|
166 ; GCN-NEXT: v_cmp_eq_u32_e64 vcc, s12, 3
|
|
167 ; GCN-NEXT: v_cndmask_b32_e32 v0, v0, v3, vcc
|
|
168 ; GCN-NEXT: buffer_store_dword v0, off, s[0:3], 0
|
|
169 ; GCN-NEXT: s_endpgm
|
|
170 entry:
|
|
171 %vec = load volatile <4 x i32>, <4 x i32> addrspace(1)* %in
|
|
172 %eltN = extractelement <4 x i32> %vec, i32 %idx
|
|
173 store i32 %eltN, i32 addrspace(1)* %out
|
|
174 ret void
|
|
175 }
|