Mercurial > hg > CbC > CbC_llvm
comparison test/CodeGen/AMDGPU/bitreverse.ll @ 147:c2174574ed3a
LLVM 10
author | Shinji KONO <kono@ie.u-ryukyu.ac.jp> |
---|---|
date | Wed, 14 Aug 2019 16:55:33 +0900 |
parents | 3a76565eade5 |
children |
comparison
equal
deleted
inserted
replaced
134:3a76565eade5 | 147:c2174574ed3a |
---|---|
1 ; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s | 1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
2 ; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s | 2 ; RUN: llc < %s -mtriple=amdgcn-- -mcpu=tahiti -verify-machineinstrs | FileCheck %s --check-prefixes=FUNC,SI |
3 ; RUN: llc -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=VI -check-prefix=FUNC %s | 3 ; RUN: llc < %s -mtriple=amdgcn-- -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs | FileCheck %s --check-prefixes=FUNC,FLAT,TONGA |
4 ; RUN: llc < %s -mtriple=amdgcn-- -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs | FileCheck %s --check-prefixes=FUNC,FLAT,VI | |
4 | 5 |
5 declare i32 @llvm.amdgcn.workitem.id.x() #1 | 6 declare i32 @llvm.amdgcn.workitem.id.x() #1 |
6 | 7 |
7 declare i16 @llvm.bitreverse.i16(i16) #1 | 8 declare i16 @llvm.bitreverse.i16(i16) #1 |
8 declare i32 @llvm.bitreverse.i32(i32) #1 | 9 declare i32 @llvm.bitreverse.i32(i32) #1 |
12 declare <4 x i32> @llvm.bitreverse.v4i32(<4 x i32>) #1 | 13 declare <4 x i32> @llvm.bitreverse.v4i32(<4 x i32>) #1 |
13 | 14 |
14 declare <2 x i64> @llvm.bitreverse.v2i64(<2 x i64>) #1 | 15 declare <2 x i64> @llvm.bitreverse.v2i64(<2 x i64>) #1 |
15 declare <4 x i64> @llvm.bitreverse.v4i64(<4 x i64>) #1 | 16 declare <4 x i64> @llvm.bitreverse.v4i64(<4 x i64>) #1 |
16 | 17 |
17 ; FUNC-LABEL: {{^}}s_brev_i16: | |
18 ; SI: s_brev_b32 | |
19 define amdgpu_kernel void @s_brev_i16(i16 addrspace(1)* noalias %out, i16 %val) #0 { | 18 define amdgpu_kernel void @s_brev_i16(i16 addrspace(1)* noalias %out, i16 %val) #0 { |
19 ; SI-LABEL: s_brev_i16: | |
20 ; SI: ; %bb.0: | |
21 ; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9 | |
22 ; SI-NEXT: s_load_dword s0, s[0:1], 0xb | |
23 ; SI-NEXT: s_mov_b32 s7, 0xf000 | |
24 ; SI-NEXT: s_mov_b32 s6, -1 | |
25 ; SI-NEXT: s_waitcnt lgkmcnt(0) | |
26 ; SI-NEXT: s_brev_b32 s0, s0 | |
27 ; SI-NEXT: s_lshr_b32 s0, s0, 16 | |
28 ; SI-NEXT: v_mov_b32_e32 v0, s0 | |
29 ; SI-NEXT: buffer_store_short v0, off, s[4:7], 0 | |
30 ; SI-NEXT: s_endpgm | |
31 ; | |
32 ; FLAT-LABEL: s_brev_i16: | |
33 ; FLAT: ; %bb.0: | |
34 ; FLAT-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24 | |
35 ; FLAT-NEXT: s_load_dword s0, s[0:1], 0x2c | |
36 ; FLAT-NEXT: s_mov_b32 s7, 0xf000 | |
37 ; FLAT-NEXT: s_mov_b32 s6, -1 | |
38 ; FLAT-NEXT: s_waitcnt lgkmcnt(0) | |
39 ; FLAT-NEXT: s_brev_b32 s0, s0 | |
40 ; FLAT-NEXT: s_lshr_b32 s0, s0, 16 | |
41 ; FLAT-NEXT: v_mov_b32_e32 v0, s0 | |
42 ; FLAT-NEXT: buffer_store_short v0, off, s[4:7], 0 | |
43 ; FLAT-NEXT: s_endpgm | |
20 %brev = call i16 @llvm.bitreverse.i16(i16 %val) #1 | 44 %brev = call i16 @llvm.bitreverse.i16(i16 %val) #1 |
21 store i16 %brev, i16 addrspace(1)* %out | 45 store i16 %brev, i16 addrspace(1)* %out |
22 ret void | 46 ret void |
23 } | 47 } |
24 | 48 |
25 ; FUNC-LABEL: {{^}}v_brev_i16: | |
26 ; SI: v_bfrev_b32_e32 | |
27 define amdgpu_kernel void @v_brev_i16(i16 addrspace(1)* noalias %out, i16 addrspace(1)* noalias %valptr) #0 { | 49 define amdgpu_kernel void @v_brev_i16(i16 addrspace(1)* noalias %out, i16 addrspace(1)* noalias %valptr) #0 { |
50 ; SI-LABEL: v_brev_i16: | |
51 ; SI: ; %bb.0: | |
52 ; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9 | |
53 ; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xb | |
54 ; SI-NEXT: s_mov_b32 s7, 0xf000 | |
55 ; SI-NEXT: s_mov_b32 s6, -1 | |
56 ; SI-NEXT: s_mov_b32 s2, s6 | |
57 ; SI-NEXT: s_mov_b32 s3, s7 | |
58 ; SI-NEXT: s_waitcnt lgkmcnt(0) | |
59 ; SI-NEXT: buffer_load_ushort v0, off, s[0:3], 0 | |
60 ; SI-NEXT: s_waitcnt vmcnt(0) | |
61 ; SI-NEXT: v_bfrev_b32_e32 v0, v0 | |
62 ; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0 | |
63 ; SI-NEXT: buffer_store_short v0, off, s[4:7], 0 | |
64 ; SI-NEXT: s_endpgm | |
65 ; | |
66 ; FLAT-LABEL: v_brev_i16: | |
67 ; FLAT: ; %bb.0: | |
68 ; FLAT-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24 | |
69 ; FLAT-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x2c | |
70 ; FLAT-NEXT: s_mov_b32 s7, 0xf000 | |
71 ; FLAT-NEXT: s_mov_b32 s6, -1 | |
72 ; FLAT-NEXT: s_mov_b32 s2, s6 | |
73 ; FLAT-NEXT: s_mov_b32 s3, s7 | |
74 ; FLAT-NEXT: s_waitcnt lgkmcnt(0) | |
75 ; FLAT-NEXT: buffer_load_ushort v0, off, s[0:3], 0 | |
76 ; FLAT-NEXT: s_waitcnt vmcnt(0) | |
77 ; FLAT-NEXT: v_bfrev_b32_e32 v0, v0 | |
78 ; FLAT-NEXT: v_lshrrev_b32_e32 v0, 16, v0 | |
79 ; FLAT-NEXT: buffer_store_short v0, off, s[4:7], 0 | |
80 ; FLAT-NEXT: s_endpgm | |
28 %val = load i16, i16 addrspace(1)* %valptr | 81 %val = load i16, i16 addrspace(1)* %valptr |
29 %brev = call i16 @llvm.bitreverse.i16(i16 %val) #1 | 82 %brev = call i16 @llvm.bitreverse.i16(i16 %val) #1 |
30 store i16 %brev, i16 addrspace(1)* %out | 83 store i16 %brev, i16 addrspace(1)* %out |
31 ret void | 84 ret void |
32 } | 85 } |
33 | 86 |
34 ; FUNC-LABEL: {{^}}s_brev_i32: | |
35 ; SI: s_load_dword [[VAL:s[0-9]+]], | |
36 ; SI: s_brev_b32 [[SRESULT:s[0-9]+]], [[VAL]] | |
37 ; SI: v_mov_b32_e32 [[VRESULT:v[0-9]+]], [[SRESULT]] | |
38 ; SI: buffer_store_dword [[VRESULT]], | |
39 ; SI: s_endpgm | |
40 define amdgpu_kernel void @s_brev_i32(i32 addrspace(1)* noalias %out, i32 %val) #0 { | 87 define amdgpu_kernel void @s_brev_i32(i32 addrspace(1)* noalias %out, i32 %val) #0 { |
88 ; SI-LABEL: s_brev_i32: | |
89 ; SI: ; %bb.0: | |
90 ; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9 | |
91 ; SI-NEXT: s_load_dword s0, s[0:1], 0xb | |
92 ; SI-NEXT: s_mov_b32 s7, 0xf000 | |
93 ; SI-NEXT: s_mov_b32 s6, -1 | |
94 ; SI-NEXT: s_waitcnt lgkmcnt(0) | |
95 ; SI-NEXT: s_brev_b32 s0, s0 | |
96 ; SI-NEXT: v_mov_b32_e32 v0, s0 | |
97 ; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0 | |
98 ; SI-NEXT: s_endpgm | |
99 ; | |
100 ; FLAT-LABEL: s_brev_i32: | |
101 ; FLAT: ; %bb.0: | |
102 ; FLAT-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24 | |
103 ; FLAT-NEXT: s_load_dword s0, s[0:1], 0x2c | |
104 ; FLAT-NEXT: s_mov_b32 s7, 0xf000 | |
105 ; FLAT-NEXT: s_mov_b32 s6, -1 | |
106 ; FLAT-NEXT: s_waitcnt lgkmcnt(0) | |
107 ; FLAT-NEXT: s_brev_b32 s0, s0 | |
108 ; FLAT-NEXT: v_mov_b32_e32 v0, s0 | |
109 ; FLAT-NEXT: buffer_store_dword v0, off, s[4:7], 0 | |
110 ; FLAT-NEXT: s_endpgm | |
41 %brev = call i32 @llvm.bitreverse.i32(i32 %val) #1 | 111 %brev = call i32 @llvm.bitreverse.i32(i32 %val) #1 |
42 store i32 %brev, i32 addrspace(1)* %out | 112 store i32 %brev, i32 addrspace(1)* %out |
43 ret void | 113 ret void |
44 } | 114 } |
45 | 115 |
46 ; FUNC-LABEL: {{^}}v_brev_i32: | |
47 ; SI: {{buffer|flat}}_load_dword [[VAL:v[0-9]+]], | |
48 ; SI: v_bfrev_b32_e32 [[RESULT:v[0-9]+]], [[VAL]] | |
49 ; SI: buffer_store_dword [[RESULT]], | |
50 ; SI: s_endpgm | |
51 define amdgpu_kernel void @v_brev_i32(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %valptr) #0 { | 116 define amdgpu_kernel void @v_brev_i32(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %valptr) #0 { |
117 ; SI-LABEL: v_brev_i32: | |
118 ; SI: ; %bb.0: | |
119 ; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9 | |
120 ; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xb | |
121 ; SI-NEXT: s_mov_b32 s7, 0xf000 | |
122 ; SI-NEXT: s_mov_b32 s2, 0 | |
123 ; SI-NEXT: s_mov_b32 s3, s7 | |
124 ; SI-NEXT: v_lshlrev_b32_e32 v0, 2, v0 | |
125 ; SI-NEXT: v_mov_b32_e32 v1, 0 | |
126 ; SI-NEXT: s_waitcnt lgkmcnt(0) | |
127 ; SI-NEXT: buffer_load_dword v0, v[0:1], s[0:3], 0 addr64 | |
128 ; SI-NEXT: s_mov_b32 s6, -1 | |
129 ; SI-NEXT: s_waitcnt vmcnt(0) | |
130 ; SI-NEXT: v_bfrev_b32_e32 v0, v0 | |
131 ; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0 | |
132 ; SI-NEXT: s_endpgm | |
133 ; | |
134 ; FLAT-LABEL: v_brev_i32: | |
135 ; FLAT: ; %bb.0: | |
136 ; FLAT-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24 | |
137 ; FLAT-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x2c | |
138 ; FLAT-NEXT: v_lshlrev_b32_e32 v0, 2, v0 | |
139 ; FLAT-NEXT: s_mov_b32 s7, 0xf000 | |
140 ; FLAT-NEXT: s_mov_b32 s6, -1 | |
141 ; FLAT-NEXT: s_waitcnt lgkmcnt(0) | |
142 ; FLAT-NEXT: v_mov_b32_e32 v1, s1 | |
143 ; FLAT-NEXT: v_add_u32_e32 v0, vcc, s0, v0 | |
144 ; FLAT-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc | |
145 ; FLAT-NEXT: flat_load_dword v0, v[0:1] | |
146 ; FLAT-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) | |
147 ; FLAT-NEXT: v_bfrev_b32_e32 v0, v0 | |
148 ; FLAT-NEXT: buffer_store_dword v0, off, s[4:7], 0 | |
149 ; FLAT-NEXT: s_endpgm | |
52 %tid = call i32 @llvm.amdgcn.workitem.id.x() | 150 %tid = call i32 @llvm.amdgcn.workitem.id.x() |
53 %gep = getelementptr i32, i32 addrspace(1)* %valptr, i32 %tid | 151 %gep = getelementptr i32, i32 addrspace(1)* %valptr, i32 %tid |
54 %val = load i32, i32 addrspace(1)* %gep | 152 %val = load i32, i32 addrspace(1)* %gep |
55 %brev = call i32 @llvm.bitreverse.i32(i32 %val) #1 | 153 %brev = call i32 @llvm.bitreverse.i32(i32 %val) #1 |
56 store i32 %brev, i32 addrspace(1)* %out | 154 store i32 %brev, i32 addrspace(1)* %out |
57 ret void | 155 ret void |
58 } | 156 } |
59 | 157 |
60 ; FUNC-LABEL: {{^}}s_brev_v2i32: | |
61 ; SI: s_brev_b32 | |
62 ; SI: s_brev_b32 | |
63 define amdgpu_kernel void @s_brev_v2i32(<2 x i32> addrspace(1)* noalias %out, <2 x i32> %val) #0 { | 158 define amdgpu_kernel void @s_brev_v2i32(<2 x i32> addrspace(1)* noalias %out, <2 x i32> %val) #0 { |
159 ; SI-LABEL: s_brev_v2i32: | |
160 ; SI: ; %bb.0: | |
161 ; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9 | |
162 ; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xb | |
163 ; SI-NEXT: s_mov_b32 s7, 0xf000 | |
164 ; SI-NEXT: s_mov_b32 s6, -1 | |
165 ; SI-NEXT: s_waitcnt lgkmcnt(0) | |
166 ; SI-NEXT: s_brev_b32 s1, s1 | |
167 ; SI-NEXT: s_brev_b32 s0, s0 | |
168 ; SI-NEXT: v_mov_b32_e32 v0, s0 | |
169 ; SI-NEXT: v_mov_b32_e32 v1, s1 | |
170 ; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 | |
171 ; SI-NEXT: s_endpgm | |
172 ; | |
173 ; FLAT-LABEL: s_brev_v2i32: | |
174 ; FLAT: ; %bb.0: | |
175 ; FLAT-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24 | |
176 ; FLAT-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x2c | |
177 ; FLAT-NEXT: s_mov_b32 s7, 0xf000 | |
178 ; FLAT-NEXT: s_mov_b32 s6, -1 | |
179 ; FLAT-NEXT: s_waitcnt lgkmcnt(0) | |
180 ; FLAT-NEXT: s_brev_b32 s1, s1 | |
181 ; FLAT-NEXT: s_brev_b32 s0, s0 | |
182 ; FLAT-NEXT: v_mov_b32_e32 v0, s0 | |
183 ; FLAT-NEXT: v_mov_b32_e32 v1, s1 | |
184 ; FLAT-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 | |
185 ; FLAT-NEXT: s_endpgm | |
64 %brev = call <2 x i32> @llvm.bitreverse.v2i32(<2 x i32> %val) #1 | 186 %brev = call <2 x i32> @llvm.bitreverse.v2i32(<2 x i32> %val) #1 |
65 store <2 x i32> %brev, <2 x i32> addrspace(1)* %out | 187 store <2 x i32> %brev, <2 x i32> addrspace(1)* %out |
66 ret void | 188 ret void |
67 } | 189 } |
68 | 190 |
69 ; FUNC-LABEL: {{^}}v_brev_v2i32: | |
70 ; SI: v_bfrev_b32_e32 | |
71 ; SI: v_bfrev_b32_e32 | |
72 define amdgpu_kernel void @v_brev_v2i32(<2 x i32> addrspace(1)* noalias %out, <2 x i32> addrspace(1)* noalias %valptr) #0 { | 191 define amdgpu_kernel void @v_brev_v2i32(<2 x i32> addrspace(1)* noalias %out, <2 x i32> addrspace(1)* noalias %valptr) #0 { |
192 ; SI-LABEL: v_brev_v2i32: | |
193 ; SI: ; %bb.0: | |
194 ; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9 | |
195 ; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xb | |
196 ; SI-NEXT: s_mov_b32 s7, 0xf000 | |
197 ; SI-NEXT: s_mov_b32 s2, 0 | |
198 ; SI-NEXT: s_mov_b32 s3, s7 | |
199 ; SI-NEXT: v_lshlrev_b32_e32 v0, 3, v0 | |
200 ; SI-NEXT: v_mov_b32_e32 v1, 0 | |
201 ; SI-NEXT: s_waitcnt lgkmcnt(0) | |
202 ; SI-NEXT: buffer_load_dwordx2 v[0:1], v[0:1], s[0:3], 0 addr64 | |
203 ; SI-NEXT: s_mov_b32 s6, -1 | |
204 ; SI-NEXT: s_waitcnt vmcnt(0) | |
205 ; SI-NEXT: v_bfrev_b32_e32 v1, v1 | |
206 ; SI-NEXT: v_bfrev_b32_e32 v0, v0 | |
207 ; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 | |
208 ; SI-NEXT: s_endpgm | |
209 ; | |
210 ; FLAT-LABEL: v_brev_v2i32: | |
211 ; FLAT: ; %bb.0: | |
212 ; FLAT-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24 | |
213 ; FLAT-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x2c | |
214 ; FLAT-NEXT: v_lshlrev_b32_e32 v0, 3, v0 | |
215 ; FLAT-NEXT: s_mov_b32 s7, 0xf000 | |
216 ; FLAT-NEXT: s_mov_b32 s6, -1 | |
217 ; FLAT-NEXT: s_waitcnt lgkmcnt(0) | |
218 ; FLAT-NEXT: v_mov_b32_e32 v1, s1 | |
219 ; FLAT-NEXT: v_add_u32_e32 v0, vcc, s0, v0 | |
220 ; FLAT-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc | |
221 ; FLAT-NEXT: flat_load_dwordx2 v[0:1], v[0:1] | |
222 ; FLAT-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) | |
223 ; FLAT-NEXT: v_bfrev_b32_e32 v1, v1 | |
224 ; FLAT-NEXT: v_bfrev_b32_e32 v0, v0 | |
225 ; FLAT-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 | |
226 ; FLAT-NEXT: s_endpgm | |
73 %tid = call i32 @llvm.amdgcn.workitem.id.x() | 227 %tid = call i32 @llvm.amdgcn.workitem.id.x() |
74 %gep = getelementptr <2 x i32>, <2 x i32> addrspace(1)* %valptr, i32 %tid | 228 %gep = getelementptr <2 x i32>, <2 x i32> addrspace(1)* %valptr, i32 %tid |
75 %val = load <2 x i32>, <2 x i32> addrspace(1)* %gep | 229 %val = load <2 x i32>, <2 x i32> addrspace(1)* %gep |
76 %brev = call <2 x i32> @llvm.bitreverse.v2i32(<2 x i32> %val) #1 | 230 %brev = call <2 x i32> @llvm.bitreverse.v2i32(<2 x i32> %val) #1 |
77 store <2 x i32> %brev, <2 x i32> addrspace(1)* %out | 231 store <2 x i32> %brev, <2 x i32> addrspace(1)* %out |
78 ret void | 232 ret void |
79 } | 233 } |
80 | 234 |
81 ; FUNC-LABEL: {{^}}s_brev_i64: | |
82 define amdgpu_kernel void @s_brev_i64(i64 addrspace(1)* noalias %out, i64 %val) #0 { | 235 define amdgpu_kernel void @s_brev_i64(i64 addrspace(1)* noalias %out, i64 %val) #0 { |
236 ; SI-LABEL: s_brev_i64: | |
237 ; SI: ; %bb.0: | |
238 ; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0xb | |
239 ; SI-NEXT: s_mov_b32 s3, 0 | |
240 ; SI-NEXT: s_mov_b32 s10, 0xff0000 | |
241 ; SI-NEXT: s_mov_b32 s11, 0xff00 | |
242 ; SI-NEXT: s_mov_b32 s7, s3 | |
243 ; SI-NEXT: s_waitcnt lgkmcnt(0) | |
244 ; SI-NEXT: v_mov_b32_e32 v0, s4 | |
245 ; SI-NEXT: v_alignbit_b32 v1, s5, v0, 24 | |
246 ; SI-NEXT: v_alignbit_b32 v0, s5, v0, 8 | |
247 ; SI-NEXT: s_lshr_b32 s6, s5, 8 | |
248 ; SI-NEXT: v_and_b32_e32 v1, s10, v1 | |
249 ; SI-NEXT: v_and_b32_e32 v0, 0xff000000, v0 | |
250 ; SI-NEXT: s_lshr_b32 s2, s5, 24 | |
251 ; SI-NEXT: s_and_b32 s6, s6, s11 | |
252 ; SI-NEXT: s_or_b64 s[6:7], s[6:7], s[2:3] | |
253 ; SI-NEXT: v_or_b32_e32 v0, v0, v1 | |
254 ; SI-NEXT: s_lshl_b64 s[8:9], s[4:5], 24 | |
255 ; SI-NEXT: v_or_b32_e32 v0, s6, v0 | |
256 ; SI-NEXT: v_mov_b32_e32 v1, s7 | |
257 ; SI-NEXT: s_lshl_b64 s[6:7], s[4:5], 8 | |
258 ; SI-NEXT: s_lshl_b32 s2, s4, 8 | |
259 ; SI-NEXT: s_and_b32 s7, s7, 0xff | |
260 ; SI-NEXT: s_mov_b32 s6, s3 | |
261 ; SI-NEXT: s_and_b32 s9, s9, s11 | |
262 ; SI-NEXT: s_mov_b32 s8, s3 | |
263 ; SI-NEXT: s_or_b64 s[6:7], s[8:9], s[6:7] | |
264 ; SI-NEXT: s_lshl_b32 s9, s4, 24 | |
265 ; SI-NEXT: s_and_b32 s5, s2, s10 | |
266 ; SI-NEXT: s_mov_b32 s4, s3 | |
267 ; SI-NEXT: s_or_b64 s[2:3], s[8:9], s[4:5] | |
268 ; SI-NEXT: s_or_b64 s[2:3], s[2:3], s[6:7] | |
269 ; SI-NEXT: v_or_b32_e32 v2, s2, v0 | |
270 ; SI-NEXT: v_or_b32_e32 v3, s3, v1 | |
271 ; SI-NEXT: s_mov_b32 s2, 0xf0f0f0f | |
272 ; SI-NEXT: v_and_b32_e32 v1, s2, v3 | |
273 ; SI-NEXT: v_and_b32_e32 v0, s2, v2 | |
274 ; SI-NEXT: s_mov_b32 s2, 0xf0f0f0f0 | |
275 ; SI-NEXT: v_and_b32_e32 v3, s2, v3 | |
276 ; SI-NEXT: v_and_b32_e32 v2, s2, v2 | |
277 ; SI-NEXT: v_lshl_b64 v[0:1], v[0:1], 4 | |
278 ; SI-NEXT: v_lshr_b64 v[2:3], v[2:3], 4 | |
279 ; SI-NEXT: s_mov_b32 s2, 0x33333333 | |
280 ; SI-NEXT: v_or_b32_e32 v2, v2, v0 | |
281 ; SI-NEXT: v_or_b32_e32 v3, v3, v1 | |
282 ; SI-NEXT: v_and_b32_e32 v1, s2, v3 | |
283 ; SI-NEXT: v_and_b32_e32 v0, s2, v2 | |
284 ; SI-NEXT: s_mov_b32 s2, 0xcccccccc | |
285 ; SI-NEXT: v_and_b32_e32 v3, s2, v3 | |
286 ; SI-NEXT: v_and_b32_e32 v2, s2, v2 | |
287 ; SI-NEXT: v_lshl_b64 v[0:1], v[0:1], 2 | |
288 ; SI-NEXT: v_lshr_b64 v[2:3], v[2:3], 2 | |
289 ; SI-NEXT: s_mov_b32 s2, 0x55555555 | |
290 ; SI-NEXT: v_or_b32_e32 v2, v2, v0 | |
291 ; SI-NEXT: v_or_b32_e32 v3, v3, v1 | |
292 ; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9 | |
293 ; SI-NEXT: v_and_b32_e32 v1, s2, v3 | |
294 ; SI-NEXT: v_and_b32_e32 v0, s2, v2 | |
295 ; SI-NEXT: s_mov_b32 s2, 0xaaaaaaaa | |
296 ; SI-NEXT: v_and_b32_e32 v3, s2, v3 | |
297 ; SI-NEXT: v_and_b32_e32 v2, s2, v2 | |
298 ; SI-NEXT: v_lshl_b64 v[0:1], v[0:1], 1 | |
299 ; SI-NEXT: v_lshr_b64 v[2:3], v[2:3], 1 | |
300 ; SI-NEXT: s_mov_b32 s3, 0xf000 | |
301 ; SI-NEXT: s_mov_b32 s2, -1 | |
302 ; SI-NEXT: v_or_b32_e32 v0, v2, v0 | |
303 ; SI-NEXT: v_or_b32_e32 v1, v3, v1 | |
304 ; SI-NEXT: s_waitcnt lgkmcnt(0) | |
305 ; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 | |
306 ; SI-NEXT: s_endpgm | |
307 ; | |
308 ; FLAT-LABEL: s_brev_i64: | |
309 ; FLAT: ; %bb.0: | |
310 ; FLAT-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x2c | |
311 ; FLAT-NEXT: s_mov_b32 s3, 0 | |
312 ; FLAT-NEXT: s_mov_b32 s10, 0xff0000 | |
313 ; FLAT-NEXT: s_mov_b32 s7, s3 | |
314 ; FLAT-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24 | |
315 ; FLAT-NEXT: s_waitcnt lgkmcnt(0) | |
316 ; FLAT-NEXT: v_mov_b32_e32 v0, s4 | |
317 ; FLAT-NEXT: v_alignbit_b32 v1, s5, v0, 24 | |
318 ; FLAT-NEXT: v_alignbit_b32 v0, s5, v0, 8 | |
319 ; FLAT-NEXT: s_bfe_u32 s6, s5, 0x80010 | |
320 ; FLAT-NEXT: v_and_b32_e32 v1, s10, v1 | |
321 ; FLAT-NEXT: v_and_b32_e32 v0, 0xff000000, v0 | |
322 ; FLAT-NEXT: s_lshr_b32 s2, s5, 24 | |
323 ; FLAT-NEXT: s_lshl_b32 s6, s6, 8 | |
324 ; FLAT-NEXT: s_or_b64 s[6:7], s[6:7], s[2:3] | |
325 ; FLAT-NEXT: v_or_b32_e32 v0, v0, v1 | |
326 ; FLAT-NEXT: s_lshl_b64 s[8:9], s[4:5], 24 | |
327 ; FLAT-NEXT: v_or_b32_e32 v0, s6, v0 | |
328 ; FLAT-NEXT: v_mov_b32_e32 v1, s7 | |
329 ; FLAT-NEXT: s_lshl_b64 s[6:7], s[4:5], 8 | |
330 ; FLAT-NEXT: s_lshl_b32 s2, s4, 8 | |
331 ; FLAT-NEXT: s_and_b32 s7, s7, 0xff | |
332 ; FLAT-NEXT: s_mov_b32 s6, s3 | |
333 ; FLAT-NEXT: s_and_b32 s9, s9, 0xff00 | |
334 ; FLAT-NEXT: s_mov_b32 s8, s3 | |
335 ; FLAT-NEXT: s_or_b64 s[6:7], s[8:9], s[6:7] | |
336 ; FLAT-NEXT: s_lshl_b32 s9, s4, 24 | |
337 ; FLAT-NEXT: s_and_b32 s5, s2, s10 | |
338 ; FLAT-NEXT: s_mov_b32 s4, s3 | |
339 ; FLAT-NEXT: s_or_b64 s[2:3], s[8:9], s[4:5] | |
340 ; FLAT-NEXT: s_or_b64 s[2:3], s[2:3], s[6:7] | |
341 ; FLAT-NEXT: v_or_b32_e32 v2, s2, v0 | |
342 ; FLAT-NEXT: v_or_b32_e32 v3, s3, v1 | |
343 ; FLAT-NEXT: s_mov_b32 s2, 0xf0f0f0f | |
344 ; FLAT-NEXT: v_and_b32_e32 v1, s2, v3 | |
345 ; FLAT-NEXT: v_and_b32_e32 v0, s2, v2 | |
346 ; FLAT-NEXT: s_mov_b32 s2, 0xf0f0f0f0 | |
347 ; FLAT-NEXT: v_and_b32_e32 v3, s2, v3 | |
348 ; FLAT-NEXT: v_and_b32_e32 v2, s2, v2 | |
349 ; FLAT-NEXT: v_lshlrev_b64 v[0:1], 4, v[0:1] | |
350 ; FLAT-NEXT: v_lshrrev_b64 v[2:3], 4, v[2:3] | |
351 ; FLAT-NEXT: s_mov_b32 s2, 0x33333333 | |
352 ; FLAT-NEXT: v_or_b32_e32 v2, v2, v0 | |
353 ; FLAT-NEXT: v_or_b32_e32 v3, v3, v1 | |
354 ; FLAT-NEXT: v_and_b32_e32 v1, s2, v3 | |
355 ; FLAT-NEXT: v_and_b32_e32 v0, s2, v2 | |
356 ; FLAT-NEXT: s_mov_b32 s2, 0xcccccccc | |
357 ; FLAT-NEXT: v_and_b32_e32 v3, s2, v3 | |
358 ; FLAT-NEXT: v_and_b32_e32 v2, s2, v2 | |
359 ; FLAT-NEXT: v_lshlrev_b64 v[0:1], 2, v[0:1] | |
360 ; FLAT-NEXT: v_lshrrev_b64 v[2:3], 2, v[2:3] | |
361 ; FLAT-NEXT: s_mov_b32 s2, 0x55555555 | |
362 ; FLAT-NEXT: v_or_b32_e32 v2, v2, v0 | |
363 ; FLAT-NEXT: v_or_b32_e32 v3, v3, v1 | |
364 ; FLAT-NEXT: v_and_b32_e32 v1, s2, v3 | |
365 ; FLAT-NEXT: v_and_b32_e32 v0, s2, v2 | |
366 ; FLAT-NEXT: s_mov_b32 s2, 0xaaaaaaaa | |
367 ; FLAT-NEXT: v_and_b32_e32 v3, s2, v3 | |
368 ; FLAT-NEXT: v_and_b32_e32 v2, s2, v2 | |
369 ; FLAT-NEXT: v_lshlrev_b64 v[0:1], 1, v[0:1] | |
370 ; FLAT-NEXT: v_lshrrev_b64 v[2:3], 1, v[2:3] | |
371 ; FLAT-NEXT: s_mov_b32 s3, 0xf000 | |
372 ; FLAT-NEXT: s_mov_b32 s2, -1 | |
373 ; FLAT-NEXT: v_or_b32_e32 v0, v2, v0 | |
374 ; FLAT-NEXT: v_or_b32_e32 v1, v3, v1 | |
375 ; FLAT-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0 | |
376 ; FLAT-NEXT: s_endpgm | |
83 %brev = call i64 @llvm.bitreverse.i64(i64 %val) #1 | 377 %brev = call i64 @llvm.bitreverse.i64(i64 %val) #1 |
84 store i64 %brev, i64 addrspace(1)* %out | 378 store i64 %brev, i64 addrspace(1)* %out |
85 ret void | 379 ret void |
86 } | 380 } |
87 | 381 |
88 ; FUNC-LABEL: {{^}}v_brev_i64: | |
89 ; SI-NOT: v_or_b32_e64 v{{[0-9]+}}, 0, 0 | |
90 define amdgpu_kernel void @v_brev_i64(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %valptr) #0 { | 382 define amdgpu_kernel void @v_brev_i64(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %valptr) #0 { |
383 ; SI-LABEL: v_brev_i64: | |
384 ; SI: ; %bb.0: | |
385 ; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9 | |
386 ; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xb | |
387 ; SI-NEXT: s_mov_b32 s7, 0xf000 | |
388 ; SI-NEXT: s_mov_b32 s2, 0 | |
389 ; SI-NEXT: s_mov_b32 s3, s7 | |
390 ; SI-NEXT: v_lshlrev_b32_e32 v0, 3, v0 | |
391 ; SI-NEXT: v_mov_b32_e32 v1, 0 | |
392 ; SI-NEXT: s_waitcnt lgkmcnt(0) | |
393 ; SI-NEXT: buffer_load_dwordx2 v[0:1], v[0:1], s[0:3], 0 addr64 | |
394 ; SI-NEXT: s_mov_b32 s0, 0xff0000 | |
395 ; SI-NEXT: s_mov_b32 s1, 0xff00 | |
396 ; SI-NEXT: s_mov_b32 s2, 0xf0f0f0f | |
397 ; SI-NEXT: s_mov_b32 s3, 0xf0f0f0f0 | |
398 ; SI-NEXT: s_mov_b32 s6, 0x33333333 | |
399 ; SI-NEXT: s_mov_b32 s8, 0xcccccccc | |
400 ; SI-NEXT: s_mov_b32 s9, 0x55555555 | |
401 ; SI-NEXT: s_mov_b32 s10, 0xaaaaaaaa | |
402 ; SI-NEXT: s_waitcnt vmcnt(0) | |
403 ; SI-NEXT: v_lshl_b64 v[2:3], v[0:1], 8 | |
404 ; SI-NEXT: v_alignbit_b32 v4, v1, v0, 24 | |
405 ; SI-NEXT: v_alignbit_b32 v5, v1, v0, 8 | |
406 ; SI-NEXT: v_lshrrev_b32_e32 v7, 8, v1 | |
407 ; SI-NEXT: v_lshrrev_b32_e32 v6, 24, v1 | |
408 ; SI-NEXT: v_lshl_b64 v[1:2], v[0:1], 24 | |
409 ; SI-NEXT: v_lshlrev_b32_e32 v1, 24, v0 | |
410 ; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v0 | |
411 ; SI-NEXT: v_and_b32_e32 v0, s0, v0 | |
412 ; SI-NEXT: v_and_b32_e32 v4, s0, v4 | |
413 ; SI-NEXT: v_and_b32_e32 v5, 0xff000000, v5 | |
414 ; SI-NEXT: v_and_b32_e32 v7, s1, v7 | |
415 ; SI-NEXT: v_and_b32_e32 v3, 0xff, v3 | |
416 ; SI-NEXT: v_and_b32_e32 v2, s1, v2 | |
417 ; SI-NEXT: v_or_b32_e32 v4, v5, v4 | |
418 ; SI-NEXT: v_or_b32_e32 v5, v7, v6 | |
419 ; SI-NEXT: v_or_b32_e32 v0, v1, v0 | |
420 ; SI-NEXT: v_or_b32_e32 v2, v2, v3 | |
421 ; SI-NEXT: v_or_b32_e32 v1, v4, v5 | |
422 ; SI-NEXT: v_or_b32_e32 v3, v0, v2 | |
423 ; SI-NEXT: v_and_b32_e32 v0, s2, v1 | |
424 ; SI-NEXT: v_and_b32_e32 v2, s3, v1 | |
425 ; SI-NEXT: v_and_b32_e32 v1, s2, v3 | |
426 ; SI-NEXT: v_and_b32_e32 v3, s3, v3 | |
427 ; SI-NEXT: v_lshl_b64 v[0:1], v[0:1], 4 | |
428 ; SI-NEXT: v_lshr_b64 v[2:3], v[2:3], 4 | |
429 ; SI-NEXT: v_or_b32_e32 v3, v3, v1 | |
430 ; SI-NEXT: v_or_b32_e32 v2, v2, v0 | |
431 ; SI-NEXT: v_and_b32_e32 v1, s6, v3 | |
432 ; SI-NEXT: v_and_b32_e32 v0, s6, v2 | |
433 ; SI-NEXT: v_and_b32_e32 v3, s8, v3 | |
434 ; SI-NEXT: v_and_b32_e32 v2, s8, v2 | |
435 ; SI-NEXT: v_lshl_b64 v[0:1], v[0:1], 2 | |
436 ; SI-NEXT: v_lshr_b64 v[2:3], v[2:3], 2 | |
437 ; SI-NEXT: s_mov_b32 s6, -1 | |
438 ; SI-NEXT: v_or_b32_e32 v3, v3, v1 | |
439 ; SI-NEXT: v_or_b32_e32 v2, v2, v0 | |
440 ; SI-NEXT: v_and_b32_e32 v1, s9, v3 | |
441 ; SI-NEXT: v_and_b32_e32 v0, s9, v2 | |
442 ; SI-NEXT: v_and_b32_e32 v3, s10, v3 | |
443 ; SI-NEXT: v_and_b32_e32 v2, s10, v2 | |
444 ; SI-NEXT: v_lshl_b64 v[0:1], v[0:1], 1 | |
445 ; SI-NEXT: v_lshr_b64 v[2:3], v[2:3], 1 | |
446 ; SI-NEXT: v_or_b32_e32 v1, v3, v1 | |
447 ; SI-NEXT: v_or_b32_e32 v0, v2, v0 | |
448 ; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 | |
449 ; SI-NEXT: s_endpgm | |
450 ; | |
451 ; FLAT-LABEL: v_brev_i64: | |
452 ; FLAT: ; %bb.0: | |
453 ; FLAT-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24 | |
454 ; FLAT-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x2c | |
455 ; FLAT-NEXT: v_lshlrev_b32_e32 v0, 3, v0 | |
456 ; FLAT-NEXT: v_mov_b32_e32 v4, 8 | |
457 ; FLAT-NEXT: s_mov_b32 s2, 0xff0000 | |
458 ; FLAT-NEXT: s_mov_b32 s3, 0xf0f0f0f | |
459 ; FLAT-NEXT: s_waitcnt lgkmcnt(0) | |
460 ; FLAT-NEXT: v_mov_b32_e32 v1, s1 | |
461 ; FLAT-NEXT: v_add_u32_e32 v0, vcc, s0, v0 | |
462 ; FLAT-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc | |
463 ; FLAT-NEXT: flat_load_dwordx2 v[0:1], v[0:1] | |
464 ; FLAT-NEXT: s_mov_b32 s0, 0xf0f0f0f0 | |
465 ; FLAT-NEXT: s_mov_b32 s1, 0x33333333 | |
466 ; FLAT-NEXT: s_mov_b32 s6, 0xcccccccc | |
467 ; FLAT-NEXT: s_mov_b32 s8, 0x55555555 | |
468 ; FLAT-NEXT: s_mov_b32 s9, 0xaaaaaaaa | |
469 ; FLAT-NEXT: s_mov_b32 s7, 0xf000 | |
470 ; FLAT-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) | |
471 ; FLAT-NEXT: v_lshlrev_b64 v[2:3], 24, v[0:1] | |
472 ; FLAT-NEXT: v_alignbit_b32 v2, v1, v0, 24 | |
473 ; FLAT-NEXT: v_alignbit_b32 v6, v1, v0, 8 | |
474 ; FLAT-NEXT: v_lshlrev_b32_sdwa v7, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 | |
475 ; FLAT-NEXT: v_lshlrev_b64 v[4:5], 8, v[0:1] | |
476 ; FLAT-NEXT: v_lshlrev_b32_e32 v4, 24, v0 | |
477 ; FLAT-NEXT: v_lshlrev_b32_e32 v0, 8, v0 | |
478 ; FLAT-NEXT: v_and_b32_e32 v2, s2, v2 | |
479 ; FLAT-NEXT: v_and_b32_e32 v6, 0xff000000, v6 | |
480 ; FLAT-NEXT: v_and_b32_e32 v0, s2, v0 | |
481 ; FLAT-NEXT: v_or_b32_sdwa v1, v7, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3 | |
482 ; FLAT-NEXT: v_or_b32_e32 v2, v6, v2 | |
483 ; FLAT-NEXT: v_and_b32_e32 v3, 0xff00, v3 | |
484 ; FLAT-NEXT: v_or_b32_e32 v1, v2, v1 | |
485 ; FLAT-NEXT: v_or_b32_e32 v0, v4, v0 | |
486 ; FLAT-NEXT: v_or_b32_sdwa v2, v3, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 | |
487 ; FLAT-NEXT: v_or_b32_e32 v3, v0, v2 | |
488 ; FLAT-NEXT: v_and_b32_e32 v0, s3, v1 | |
489 ; FLAT-NEXT: v_and_b32_e32 v2, s0, v1 | |
490 ; FLAT-NEXT: v_and_b32_e32 v1, s3, v3 | |
491 ; FLAT-NEXT: v_and_b32_e32 v3, s0, v3 | |
492 ; FLAT-NEXT: v_lshlrev_b64 v[0:1], 4, v[0:1] | |
493 ; FLAT-NEXT: v_lshrrev_b64 v[2:3], 4, v[2:3] | |
494 ; FLAT-NEXT: v_or_b32_e32 v3, v3, v1 | |
495 ; FLAT-NEXT: v_or_b32_e32 v2, v2, v0 | |
496 ; FLAT-NEXT: v_and_b32_e32 v1, s1, v3 | |
497 ; FLAT-NEXT: v_and_b32_e32 v0, s1, v2 | |
498 ; FLAT-NEXT: v_and_b32_e32 v3, s6, v3 | |
499 ; FLAT-NEXT: v_and_b32_e32 v2, s6, v2 | |
500 ; FLAT-NEXT: v_lshlrev_b64 v[0:1], 2, v[0:1] | |
501 ; FLAT-NEXT: v_lshrrev_b64 v[2:3], 2, v[2:3] | |
502 ; FLAT-NEXT: s_mov_b32 s6, -1 | |
503 ; FLAT-NEXT: v_or_b32_e32 v3, v3, v1 | |
504 ; FLAT-NEXT: v_or_b32_e32 v2, v2, v0 | |
505 ; FLAT-NEXT: v_and_b32_e32 v1, s8, v3 | |
506 ; FLAT-NEXT: v_and_b32_e32 v0, s8, v2 | |
507 ; FLAT-NEXT: v_and_b32_e32 v3, s9, v3 | |
508 ; FLAT-NEXT: v_and_b32_e32 v2, s9, v2 | |
509 ; FLAT-NEXT: v_lshlrev_b64 v[0:1], 1, v[0:1] | |
510 ; FLAT-NEXT: v_lshrrev_b64 v[2:3], 1, v[2:3] | |
511 ; FLAT-NEXT: v_or_b32_e32 v1, v3, v1 | |
512 ; FLAT-NEXT: v_or_b32_e32 v0, v2, v0 | |
513 ; FLAT-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0 | |
514 ; FLAT-NEXT: s_endpgm | |
91 %tid = call i32 @llvm.amdgcn.workitem.id.x() | 515 %tid = call i32 @llvm.amdgcn.workitem.id.x() |
92 %gep = getelementptr i64, i64 addrspace(1)* %valptr, i32 %tid | 516 %gep = getelementptr i64, i64 addrspace(1)* %valptr, i32 %tid |
93 %val = load i64, i64 addrspace(1)* %gep | 517 %val = load i64, i64 addrspace(1)* %gep |
94 %brev = call i64 @llvm.bitreverse.i64(i64 %val) #1 | 518 %brev = call i64 @llvm.bitreverse.i64(i64 %val) #1 |
95 store i64 %brev, i64 addrspace(1)* %out | 519 store i64 %brev, i64 addrspace(1)* %out |
96 ret void | 520 ret void |
97 } | 521 } |
98 | 522 |
99 ; FUNC-LABEL: {{^}}s_brev_v2i64: | |
100 define amdgpu_kernel void @s_brev_v2i64(<2 x i64> addrspace(1)* noalias %out, <2 x i64> %val) #0 { | 523 define amdgpu_kernel void @s_brev_v2i64(<2 x i64> addrspace(1)* noalias %out, <2 x i64> %val) #0 { |
524 ; SI-LABEL: s_brev_v2i64: | |
525 ; SI: ; %bb.0: | |
526 ; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9 | |
527 ; SI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0xd | |
528 ; SI-NEXT: s_mov_b32 s9, 0 | |
529 ; SI-NEXT: s_mov_b32 s12, 0xff0000 | |
530 ; SI-NEXT: s_mov_b32 s13, 0xff000000 | |
531 ; SI-NEXT: s_mov_b32 s14, 0xff00 | |
532 ; SI-NEXT: s_waitcnt lgkmcnt(0) | |
533 ; SI-NEXT: v_mov_b32_e32 v0, s2 | |
534 ; SI-NEXT: v_alignbit_b32 v1, s3, v0, 24 | |
535 ; SI-NEXT: v_alignbit_b32 v0, s3, v0, 8 | |
536 ; SI-NEXT: s_lshr_b32 s6, s3, 8 | |
537 ; SI-NEXT: v_and_b32_e32 v1, s12, v1 | |
538 ; SI-NEXT: v_and_b32_e32 v0, s13, v0 | |
539 ; SI-NEXT: s_lshr_b32 s8, s3, 24 | |
540 ; SI-NEXT: s_and_b32 s6, s6, s14 | |
541 ; SI-NEXT: s_mov_b32 s7, s9 | |
542 ; SI-NEXT: s_or_b64 s[6:7], s[6:7], s[8:9] | |
543 ; SI-NEXT: v_or_b32_e32 v0, v0, v1 | |
544 ; SI-NEXT: s_lshl_b32 s8, s2, 8 | |
545 ; SI-NEXT: v_or_b32_e32 v0, s6, v0 | |
546 ; SI-NEXT: v_mov_b32_e32 v1, s7 | |
547 ; SI-NEXT: s_and_b32 s11, s8, s12 | |
548 ; SI-NEXT: s_lshl_b32 s7, s2, 24 | |
549 ; SI-NEXT: s_mov_b32 s6, s9 | |
550 ; SI-NEXT: s_mov_b32 s10, s9 | |
551 ; SI-NEXT: s_or_b64 s[6:7], s[6:7], s[10:11] | |
552 ; SI-NEXT: s_lshl_b64 s[10:11], s[2:3], 8 | |
553 ; SI-NEXT: s_lshl_b64 s[2:3], s[2:3], 24 | |
554 ; SI-NEXT: s_movk_i32 s15, 0xff | |
555 ; SI-NEXT: s_and_b32 s11, s11, s15 | |
556 ; SI-NEXT: s_mov_b32 s10, s9 | |
557 ; SI-NEXT: s_and_b32 s3, s3, s14 | |
558 ; SI-NEXT: s_mov_b32 s2, s9 | |
559 ; SI-NEXT: s_or_b64 s[2:3], s[2:3], s[10:11] | |
560 ; SI-NEXT: s_or_b64 s[2:3], s[6:7], s[2:3] | |
561 ; SI-NEXT: v_mov_b32_e32 v4, s0 | |
562 ; SI-NEXT: v_alignbit_b32 v5, s1, v4, 24 | |
563 ; SI-NEXT: v_alignbit_b32 v4, s1, v4, 8 | |
564 ; SI-NEXT: v_or_b32_e32 v2, s2, v0 | |
565 ; SI-NEXT: s_lshr_b32 s2, s1, 8 | |
566 ; SI-NEXT: v_or_b32_e32 v3, s3, v1 | |
567 ; SI-NEXT: v_and_b32_e32 v5, s12, v5 | |
568 ; SI-NEXT: v_and_b32_e32 v4, s13, v4 | |
569 ; SI-NEXT: s_lshr_b32 s8, s1, 24 | |
570 ; SI-NEXT: s_and_b32 s2, s2, s14 | |
571 ; SI-NEXT: s_mov_b32 s3, s9 | |
572 ; SI-NEXT: s_or_b64 s[2:3], s[2:3], s[8:9] | |
573 ; SI-NEXT: v_or_b32_e32 v4, v4, v5 | |
574 ; SI-NEXT: s_lshl_b32 s8, s0, 8 | |
575 ; SI-NEXT: v_or_b32_e32 v4, s2, v4 | |
576 ; SI-NEXT: v_mov_b32_e32 v5, s3 | |
577 ; SI-NEXT: s_lshl_b32 s3, s0, 24 | |
578 ; SI-NEXT: s_mov_b32 s2, s9 | |
579 ; SI-NEXT: s_and_b32 s11, s8, s12 | |
580 ; SI-NEXT: s_mov_b32 s16, 0xf0f0f0f | |
581 ; SI-NEXT: s_or_b64 s[2:3], s[2:3], s[10:11] | |
582 ; SI-NEXT: s_lshl_b64 s[10:11], s[0:1], 8 | |
583 ; SI-NEXT: s_lshl_b64 s[0:1], s[0:1], 24 | |
584 ; SI-NEXT: s_mov_b32 s17, 0xf0f0f0f0 | |
585 ; SI-NEXT: v_and_b32_e32 v0, s16, v2 | |
586 ; SI-NEXT: v_and_b32_e32 v1, s16, v3 | |
587 ; SI-NEXT: v_and_b32_e32 v2, s17, v2 | |
588 ; SI-NEXT: v_and_b32_e32 v3, s17, v3 | |
589 ; SI-NEXT: s_and_b32 s11, s11, s15 | |
590 ; SI-NEXT: s_mov_b32 s10, s9 | |
591 ; SI-NEXT: s_and_b32 s1, s1, s14 | |
592 ; SI-NEXT: s_mov_b32 s0, s9 | |
593 ; SI-NEXT: s_or_b64 s[0:1], s[0:1], s[10:11] | |
594 ; SI-NEXT: v_lshl_b64 v[0:1], v[0:1], 4 | |
595 ; SI-NEXT: v_lshr_b64 v[2:3], v[2:3], 4 | |
596 ; SI-NEXT: s_or_b64 s[0:1], s[2:3], s[0:1] | |
597 ; SI-NEXT: v_or_b32_e32 v6, s0, v4 | |
598 ; SI-NEXT: v_or_b32_e32 v7, s1, v5 | |
599 ; SI-NEXT: v_or_b32_e32 v2, v2, v0 | |
600 ; SI-NEXT: s_mov_b32 s18, 0x33333333 | |
601 ; SI-NEXT: v_or_b32_e32 v3, v3, v1 | |
602 ; SI-NEXT: s_mov_b32 s19, 0xcccccccc | |
603 ; SI-NEXT: v_and_b32_e32 v0, s18, v2 | |
604 ; SI-NEXT: v_and_b32_e32 v1, s18, v3 | |
605 ; SI-NEXT: v_and_b32_e32 v4, s16, v6 | |
606 ; SI-NEXT: v_and_b32_e32 v5, s16, v7 | |
607 ; SI-NEXT: v_and_b32_e32 v2, s19, v2 | |
608 ; SI-NEXT: v_and_b32_e32 v3, s19, v3 | |
609 ; SI-NEXT: v_and_b32_e32 v6, s17, v6 | |
610 ; SI-NEXT: v_and_b32_e32 v7, s17, v7 | |
611 ; SI-NEXT: v_lshl_b64 v[0:1], v[0:1], 2 | |
612 ; SI-NEXT: v_lshr_b64 v[2:3], v[2:3], 2 | |
613 ; SI-NEXT: v_lshl_b64 v[4:5], v[4:5], 4 | |
614 ; SI-NEXT: v_lshr_b64 v[6:7], v[6:7], 4 | |
615 ; SI-NEXT: v_or_b32_e32 v2, v2, v0 | |
616 ; SI-NEXT: v_or_b32_e32 v6, v6, v4 | |
617 ; SI-NEXT: v_or_b32_e32 v7, v7, v5 | |
618 ; SI-NEXT: s_mov_b32 s20, 0x55555555 | |
619 ; SI-NEXT: v_or_b32_e32 v3, v3, v1 | |
620 ; SI-NEXT: s_mov_b32 s21, 0xaaaaaaaa | |
621 ; SI-NEXT: v_and_b32_e32 v0, s20, v2 | |
622 ; SI-NEXT: v_and_b32_e32 v1, s20, v3 | |
623 ; SI-NEXT: v_and_b32_e32 v4, s18, v6 | |
624 ; SI-NEXT: v_and_b32_e32 v5, s18, v7 | |
625 ; SI-NEXT: v_and_b32_e32 v2, s21, v2 | |
626 ; SI-NEXT: v_and_b32_e32 v3, s21, v3 | |
627 ; SI-NEXT: v_and_b32_e32 v6, s19, v6 | |
628 ; SI-NEXT: v_and_b32_e32 v7, s19, v7 | |
629 ; SI-NEXT: v_lshl_b64 v[0:1], v[0:1], 1 | |
630 ; SI-NEXT: v_lshr_b64 v[2:3], v[2:3], 1 | |
631 ; SI-NEXT: v_lshl_b64 v[4:5], v[4:5], 2 | |
632 ; SI-NEXT: v_lshr_b64 v[6:7], v[6:7], 2 | |
633 ; SI-NEXT: v_or_b32_e32 v2, v2, v0 | |
634 ; SI-NEXT: v_or_b32_e32 v0, v6, v4 | |
635 ; SI-NEXT: v_or_b32_e32 v7, v7, v5 | |
636 ; SI-NEXT: v_and_b32_e32 v5, s20, v7 | |
637 ; SI-NEXT: v_and_b32_e32 v4, s20, v0 | |
638 ; SI-NEXT: v_and_b32_e32 v6, s21, v0 | |
639 ; SI-NEXT: v_and_b32_e32 v7, s21, v7 | |
640 ; SI-NEXT: v_lshl_b64 v[4:5], v[4:5], 1 | |
641 ; SI-NEXT: v_lshr_b64 v[6:7], v[6:7], 1 | |
642 ; SI-NEXT: v_or_b32_e32 v3, v3, v1 | |
643 ; SI-NEXT: s_mov_b32 s7, 0xf000 | |
644 ; SI-NEXT: s_mov_b32 s6, -1 | |
645 ; SI-NEXT: v_or_b32_e32 v0, v6, v4 | |
646 ; SI-NEXT: v_or_b32_e32 v1, v7, v5 | |
647 ; SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[4:7], 0 | |
648 ; SI-NEXT: s_endpgm | |
649 ; | |
650 ; FLAT-LABEL: s_brev_v2i64: | |
651 ; FLAT: ; %bb.0: | |
652 ; FLAT-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24 | |
653 ; FLAT-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x34 | |
654 ; FLAT-NEXT: s_mov_b32 s9, 0 | |
655 ; FLAT-NEXT: s_mov_b32 s12, 0xff0000 | |
656 ; FLAT-NEXT: s_mov_b32 s13, 0xff000000 | |
657 ; FLAT-NEXT: s_mov_b32 s7, s9 | |
658 ; FLAT-NEXT: s_waitcnt lgkmcnt(0) | |
659 ; FLAT-NEXT: v_mov_b32_e32 v0, s2 | |
660 ; FLAT-NEXT: v_alignbit_b32 v1, s3, v0, 24 | |
661 ; FLAT-NEXT: v_alignbit_b32 v0, s3, v0, 8 | |
662 ; FLAT-NEXT: s_bfe_u32 s6, s3, 0x80010 | |
663 ; FLAT-NEXT: v_and_b32_e32 v1, s12, v1 | |
664 ; FLAT-NEXT: v_and_b32_e32 v0, s13, v0 | |
665 ; FLAT-NEXT: s_lshr_b32 s8, s3, 24 | |
666 ; FLAT-NEXT: s_lshl_b32 s6, s6, 8 | |
667 ; FLAT-NEXT: s_or_b64 s[6:7], s[6:7], s[8:9] | |
668 ; FLAT-NEXT: v_or_b32_e32 v0, v0, v1 | |
669 ; FLAT-NEXT: s_lshl_b32 s8, s2, 8 | |
670 ; FLAT-NEXT: v_or_b32_e32 v0, s6, v0 | |
671 ; FLAT-NEXT: v_mov_b32_e32 v1, s7 | |
672 ; FLAT-NEXT: s_and_b32 s11, s8, s12 | |
673 ; FLAT-NEXT: s_lshl_b32 s7, s2, 24 | |
674 ; FLAT-NEXT: s_mov_b32 s6, s9 | |
675 ; FLAT-NEXT: s_mov_b32 s10, s9 | |
676 ; FLAT-NEXT: s_or_b64 s[6:7], s[6:7], s[10:11] | |
677 ; FLAT-NEXT: s_lshl_b64 s[10:11], s[2:3], 8 | |
678 ; FLAT-NEXT: s_movk_i32 s14, 0xff | |
679 ; FLAT-NEXT: s_lshl_b64 s[2:3], s[2:3], 24 | |
680 ; FLAT-NEXT: s_mov_b32 s15, 0xff00 | |
681 ; FLAT-NEXT: s_and_b32 s11, s11, s14 | |
682 ; FLAT-NEXT: s_mov_b32 s10, s9 | |
683 ; FLAT-NEXT: s_and_b32 s3, s3, s15 | |
684 ; FLAT-NEXT: s_mov_b32 s2, s9 | |
685 ; FLAT-NEXT: s_or_b64 s[2:3], s[2:3], s[10:11] | |
686 ; FLAT-NEXT: s_or_b64 s[2:3], s[6:7], s[2:3] | |
687 ; FLAT-NEXT: v_mov_b32_e32 v4, s0 | |
688 ; FLAT-NEXT: v_alignbit_b32 v5, s1, v4, 24 | |
689 ; FLAT-NEXT: v_alignbit_b32 v4, s1, v4, 8 | |
690 ; FLAT-NEXT: v_or_b32_e32 v2, s2, v0 | |
691 ; FLAT-NEXT: s_bfe_u32 s2, s1, 0x80010 | |
692 ; FLAT-NEXT: v_or_b32_e32 v3, s3, v1 | |
693 ; FLAT-NEXT: v_and_b32_e32 v5, s12, v5 | |
694 ; FLAT-NEXT: v_and_b32_e32 v4, s13, v4 | |
695 ; FLAT-NEXT: s_lshr_b32 s8, s1, 24 | |
696 ; FLAT-NEXT: s_lshl_b32 s2, s2, 8 | |
697 ; FLAT-NEXT: s_mov_b32 s3, s9 | |
698 ; FLAT-NEXT: s_or_b64 s[2:3], s[2:3], s[8:9] | |
699 ; FLAT-NEXT: v_or_b32_e32 v4, v4, v5 | |
700 ; FLAT-NEXT: s_lshl_b32 s8, s0, 8 | |
701 ; FLAT-NEXT: v_or_b32_e32 v4, s2, v4 | |
702 ; FLAT-NEXT: v_mov_b32_e32 v5, s3 | |
703 ; FLAT-NEXT: s_lshl_b32 s3, s0, 24 | |
704 ; FLAT-NEXT: s_mov_b32 s2, s9 | |
705 ; FLAT-NEXT: s_and_b32 s11, s8, s12 | |
706 ; FLAT-NEXT: s_mov_b32 s16, 0xf0f0f0f | |
707 ; FLAT-NEXT: s_or_b64 s[2:3], s[2:3], s[10:11] | |
708 ; FLAT-NEXT: s_lshl_b64 s[10:11], s[0:1], 8 | |
709 ; FLAT-NEXT: s_lshl_b64 s[0:1], s[0:1], 24 | |
710 ; FLAT-NEXT: s_mov_b32 s17, 0xf0f0f0f0 | |
711 ; FLAT-NEXT: v_and_b32_e32 v0, s16, v2 | |
712 ; FLAT-NEXT: v_and_b32_e32 v1, s16, v3 | |
713 ; FLAT-NEXT: v_and_b32_e32 v2, s17, v2 | |
714 ; FLAT-NEXT: v_and_b32_e32 v3, s17, v3 | |
715 ; FLAT-NEXT: s_and_b32 s11, s11, s14 | |
716 ; FLAT-NEXT: s_mov_b32 s10, s9 | |
717 ; FLAT-NEXT: s_and_b32 s1, s1, s15 | |
718 ; FLAT-NEXT: s_mov_b32 s0, s9 | |
719 ; FLAT-NEXT: s_or_b64 s[0:1], s[0:1], s[10:11] | |
720 ; FLAT-NEXT: v_lshlrev_b64 v[0:1], 4, v[0:1] | |
721 ; FLAT-NEXT: v_lshrrev_b64 v[2:3], 4, v[2:3] | |
722 ; FLAT-NEXT: s_or_b64 s[0:1], s[2:3], s[0:1] | |
723 ; FLAT-NEXT: v_or_b32_e32 v6, s0, v4 | |
724 ; FLAT-NEXT: v_or_b32_e32 v7, s1, v5 | |
725 ; FLAT-NEXT: v_or_b32_e32 v2, v2, v0 | |
726 ; FLAT-NEXT: s_mov_b32 s18, 0x33333333 | |
727 ; FLAT-NEXT: v_or_b32_e32 v3, v3, v1 | |
728 ; FLAT-NEXT: s_mov_b32 s19, 0xcccccccc | |
729 ; FLAT-NEXT: v_and_b32_e32 v0, s18, v2 | |
730 ; FLAT-NEXT: v_and_b32_e32 v1, s18, v3 | |
731 ; FLAT-NEXT: v_and_b32_e32 v4, s16, v6 | |
732 ; FLAT-NEXT: v_and_b32_e32 v5, s16, v7 | |
733 ; FLAT-NEXT: v_and_b32_e32 v2, s19, v2 | |
734 ; FLAT-NEXT: v_and_b32_e32 v3, s19, v3 | |
735 ; FLAT-NEXT: v_and_b32_e32 v6, s17, v6 | |
736 ; FLAT-NEXT: v_and_b32_e32 v7, s17, v7 | |
737 ; FLAT-NEXT: v_lshlrev_b64 v[0:1], 2, v[0:1] | |
738 ; FLAT-NEXT: v_lshrrev_b64 v[2:3], 2, v[2:3] | |
739 ; FLAT-NEXT: v_lshlrev_b64 v[4:5], 4, v[4:5] | |
740 ; FLAT-NEXT: v_lshrrev_b64 v[6:7], 4, v[6:7] | |
741 ; FLAT-NEXT: v_or_b32_e32 v2, v2, v0 | |
742 ; FLAT-NEXT: v_or_b32_e32 v6, v6, v4 | |
743 ; FLAT-NEXT: v_or_b32_e32 v7, v7, v5 | |
744 ; FLAT-NEXT: s_mov_b32 s20, 0x55555555 | |
745 ; FLAT-NEXT: v_or_b32_e32 v3, v3, v1 | |
746 ; FLAT-NEXT: s_mov_b32 s21, 0xaaaaaaaa | |
747 ; FLAT-NEXT: v_and_b32_e32 v0, s20, v2 | |
748 ; FLAT-NEXT: v_and_b32_e32 v1, s20, v3 | |
749 ; FLAT-NEXT: v_and_b32_e32 v4, s18, v6 | |
750 ; FLAT-NEXT: v_and_b32_e32 v5, s18, v7 | |
751 ; FLAT-NEXT: v_and_b32_e32 v2, s21, v2 | |
752 ; FLAT-NEXT: v_and_b32_e32 v3, s21, v3 | |
753 ; FLAT-NEXT: v_and_b32_e32 v6, s19, v6 | |
754 ; FLAT-NEXT: v_and_b32_e32 v7, s19, v7 | |
755 ; FLAT-NEXT: v_lshlrev_b64 v[0:1], 1, v[0:1] | |
756 ; FLAT-NEXT: v_lshrrev_b64 v[2:3], 1, v[2:3] | |
757 ; FLAT-NEXT: v_lshlrev_b64 v[4:5], 2, v[4:5] | |
758 ; FLAT-NEXT: v_lshrrev_b64 v[6:7], 2, v[6:7] | |
759 ; FLAT-NEXT: v_or_b32_e32 v2, v2, v0 | |
760 ; FLAT-NEXT: v_or_b32_e32 v0, v6, v4 | |
761 ; FLAT-NEXT: v_or_b32_e32 v7, v7, v5 | |
762 ; FLAT-NEXT: v_and_b32_e32 v5, s20, v7 | |
763 ; FLAT-NEXT: v_and_b32_e32 v4, s20, v0 | |
764 ; FLAT-NEXT: v_and_b32_e32 v6, s21, v0 | |
765 ; FLAT-NEXT: v_and_b32_e32 v7, s21, v7 | |
766 ; FLAT-NEXT: v_lshlrev_b64 v[4:5], 1, v[4:5] | |
767 ; FLAT-NEXT: v_lshrrev_b64 v[6:7], 1, v[6:7] | |
768 ; FLAT-NEXT: v_or_b32_e32 v3, v3, v1 | |
769 ; FLAT-NEXT: s_mov_b32 s7, 0xf000 | |
770 ; FLAT-NEXT: s_mov_b32 s6, -1 | |
771 ; FLAT-NEXT: v_or_b32_e32 v0, v6, v4 | |
772 ; FLAT-NEXT: v_or_b32_e32 v1, v7, v5 | |
773 ; FLAT-NEXT: buffer_store_dwordx4 v[0:3], off, s[4:7], 0 | |
774 ; FLAT-NEXT: s_endpgm | |
101 %brev = call <2 x i64> @llvm.bitreverse.v2i64(<2 x i64> %val) #1 | 775 %brev = call <2 x i64> @llvm.bitreverse.v2i64(<2 x i64> %val) #1 |
102 store <2 x i64> %brev, <2 x i64> addrspace(1)* %out | 776 store <2 x i64> %brev, <2 x i64> addrspace(1)* %out |
103 ret void | 777 ret void |
104 } | 778 } |
105 | 779 |
106 ; FUNC-LABEL: {{^}}v_brev_v2i64: | |
107 define amdgpu_kernel void @v_brev_v2i64(<2 x i64> addrspace(1)* noalias %out, <2 x i64> addrspace(1)* noalias %valptr) #0 { | 780 define amdgpu_kernel void @v_brev_v2i64(<2 x i64> addrspace(1)* noalias %out, <2 x i64> addrspace(1)* noalias %valptr) #0 { |
781 ; SI-LABEL: v_brev_v2i64: | |
782 ; SI: ; %bb.0: | |
783 ; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9 | |
784 ; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xb | |
785 ; SI-NEXT: s_mov_b32 s7, 0xf000 | |
786 ; SI-NEXT: s_mov_b32 s2, 0 | |
787 ; SI-NEXT: s_mov_b32 s3, s7 | |
788 ; SI-NEXT: v_lshlrev_b32_e32 v0, 4, v0 | |
789 ; SI-NEXT: v_mov_b32_e32 v1, 0 | |
790 ; SI-NEXT: s_waitcnt lgkmcnt(0) | |
791 ; SI-NEXT: buffer_load_dwordx4 v[0:3], v[0:1], s[0:3], 0 addr64 | |
792 ; SI-NEXT: s_mov_b32 s0, 0xff0000 | |
793 ; SI-NEXT: s_mov_b32 s1, 0xff000000 | |
794 ; SI-NEXT: s_mov_b32 s2, 0xff00 | |
795 ; SI-NEXT: s_movk_i32 s3, 0xff | |
796 ; SI-NEXT: s_mov_b32 s8, 0xf0f0f0f | |
797 ; SI-NEXT: s_mov_b32 s9, 0xf0f0f0f0 | |
798 ; SI-NEXT: s_mov_b32 s10, 0x33333333 | |
799 ; SI-NEXT: s_mov_b32 s11, 0xcccccccc | |
800 ; SI-NEXT: s_mov_b32 s12, 0x55555555 | |
801 ; SI-NEXT: s_mov_b32 s13, 0xaaaaaaaa | |
802 ; SI-NEXT: s_mov_b32 s6, -1 | |
803 ; SI-NEXT: s_waitcnt vmcnt(0) | |
804 ; SI-NEXT: v_lshl_b64 v[4:5], v[2:3], 8 | |
805 ; SI-NEXT: v_alignbit_b32 v6, v3, v2, 24 | |
806 ; SI-NEXT: v_alignbit_b32 v7, v3, v2, 8 | |
807 ; SI-NEXT: v_lshrrev_b32_e32 v8, 24, v3 | |
808 ; SI-NEXT: v_lshrrev_b32_e32 v9, 8, v3 | |
809 ; SI-NEXT: v_lshl_b64 v[3:4], v[2:3], 24 | |
810 ; SI-NEXT: v_lshlrev_b32_e32 v10, 24, v2 | |
811 ; SI-NEXT: v_lshlrev_b32_e32 v11, 8, v2 | |
812 ; SI-NEXT: v_lshl_b64 v[2:3], v[0:1], 8 | |
813 ; SI-NEXT: v_alignbit_b32 v12, v1, v0, 24 | |
814 ; SI-NEXT: v_alignbit_b32 v13, v1, v0, 8 | |
815 ; SI-NEXT: v_lshrrev_b32_e32 v14, 24, v1 | |
816 ; SI-NEXT: v_lshrrev_b32_e32 v15, 8, v1 | |
817 ; SI-NEXT: v_lshlrev_b32_e32 v16, 24, v0 | |
818 ; SI-NEXT: v_lshlrev_b32_e32 v17, 8, v0 | |
819 ; SI-NEXT: v_lshl_b64 v[0:1], v[0:1], 24 | |
820 ; SI-NEXT: v_and_b32_e32 v0, s0, v6 | |
821 ; SI-NEXT: v_and_b32_e32 v2, s1, v7 | |
822 ; SI-NEXT: v_and_b32_e32 v6, s2, v9 | |
823 ; SI-NEXT: v_and_b32_e32 v7, s0, v11 | |
824 ; SI-NEXT: v_and_b32_e32 v9, s0, v12 | |
825 ; SI-NEXT: v_and_b32_e32 v11, s1, v13 | |
826 ; SI-NEXT: v_or_b32_e32 v0, v2, v0 | |
827 ; SI-NEXT: v_or_b32_e32 v2, v6, v8 | |
828 ; SI-NEXT: v_and_b32_e32 v12, s2, v15 | |
829 ; SI-NEXT: v_and_b32_e32 v13, s0, v17 | |
830 ; SI-NEXT: v_and_b32_e32 v5, s3, v5 | |
831 ; SI-NEXT: v_and_b32_e32 v4, s2, v4 | |
832 ; SI-NEXT: v_and_b32_e32 v3, s3, v3 | |
833 ; SI-NEXT: v_and_b32_e32 v1, s2, v1 | |
834 ; SI-NEXT: v_or_b32_e32 v6, v10, v7 | |
835 ; SI-NEXT: v_or_b32_e32 v7, v11, v9 | |
836 ; SI-NEXT: v_or_b32_e32 v2, v0, v2 | |
837 ; SI-NEXT: v_or_b32_e32 v8, v12, v14 | |
838 ; SI-NEXT: v_or_b32_e32 v0, v4, v5 | |
839 ; SI-NEXT: v_or_b32_e32 v1, v1, v3 | |
840 ; SI-NEXT: v_or_b32_e32 v9, v16, v13 | |
841 ; SI-NEXT: v_or_b32_e32 v5, v7, v8 | |
842 ; SI-NEXT: v_or_b32_e32 v3, v6, v0 | |
843 ; SI-NEXT: v_or_b32_e32 v7, v9, v1 | |
844 ; SI-NEXT: v_and_b32_e32 v0, s8, v2 | |
845 ; SI-NEXT: v_and_b32_e32 v1, s8, v3 | |
846 ; SI-NEXT: v_and_b32_e32 v2, s9, v2 | |
847 ; SI-NEXT: v_and_b32_e32 v3, s9, v3 | |
848 ; SI-NEXT: v_and_b32_e32 v4, s8, v5 | |
849 ; SI-NEXT: v_and_b32_e32 v6, s9, v5 | |
850 ; SI-NEXT: v_and_b32_e32 v5, s8, v7 | |
851 ; SI-NEXT: v_and_b32_e32 v7, s9, v7 | |
852 ; SI-NEXT: v_lshl_b64 v[0:1], v[0:1], 4 | |
853 ; SI-NEXT: v_lshr_b64 v[2:3], v[2:3], 4 | |
854 ; SI-NEXT: v_lshl_b64 v[4:5], v[4:5], 4 | |
855 ; SI-NEXT: v_lshr_b64 v[6:7], v[6:7], 4 | |
856 ; SI-NEXT: v_or_b32_e32 v3, v3, v1 | |
857 ; SI-NEXT: v_or_b32_e32 v2, v2, v0 | |
858 ; SI-NEXT: v_or_b32_e32 v7, v7, v5 | |
859 ; SI-NEXT: v_or_b32_e32 v6, v6, v4 | |
860 ; SI-NEXT: v_and_b32_e32 v1, s10, v3 | |
861 ; SI-NEXT: v_and_b32_e32 v0, s10, v2 | |
862 ; SI-NEXT: v_and_b32_e32 v5, s10, v7 | |
863 ; SI-NEXT: v_and_b32_e32 v4, s10, v6 | |
864 ; SI-NEXT: v_and_b32_e32 v3, s11, v3 | |
865 ; SI-NEXT: v_and_b32_e32 v2, s11, v2 | |
866 ; SI-NEXT: v_and_b32_e32 v7, s11, v7 | |
867 ; SI-NEXT: v_and_b32_e32 v6, s11, v6 | |
868 ; SI-NEXT: v_lshl_b64 v[0:1], v[0:1], 2 | |
869 ; SI-NEXT: v_lshr_b64 v[2:3], v[2:3], 2 | |
870 ; SI-NEXT: v_lshl_b64 v[4:5], v[4:5], 2 | |
871 ; SI-NEXT: v_lshr_b64 v[6:7], v[6:7], 2 | |
872 ; SI-NEXT: v_or_b32_e32 v3, v3, v1 | |
873 ; SI-NEXT: v_or_b32_e32 v2, v2, v0 | |
874 ; SI-NEXT: v_or_b32_e32 v7, v7, v5 | |
875 ; SI-NEXT: v_or_b32_e32 v6, v6, v4 | |
876 ; SI-NEXT: v_and_b32_e32 v1, s12, v3 | |
877 ; SI-NEXT: v_and_b32_e32 v0, s12, v2 | |
878 ; SI-NEXT: v_and_b32_e32 v5, s12, v7 | |
879 ; SI-NEXT: v_and_b32_e32 v4, s12, v6 | |
880 ; SI-NEXT: v_and_b32_e32 v3, s13, v3 | |
881 ; SI-NEXT: v_and_b32_e32 v2, s13, v2 | |
882 ; SI-NEXT: v_and_b32_e32 v7, s13, v7 | |
883 ; SI-NEXT: v_and_b32_e32 v6, s13, v6 | |
884 ; SI-NEXT: v_lshl_b64 v[0:1], v[0:1], 1 | |
885 ; SI-NEXT: v_lshr_b64 v[2:3], v[2:3], 1 | |
886 ; SI-NEXT: v_lshl_b64 v[4:5], v[4:5], 1 | |
887 ; SI-NEXT: v_lshr_b64 v[6:7], v[6:7], 1 | |
888 ; SI-NEXT: v_or_b32_e32 v3, v3, v1 | |
889 ; SI-NEXT: v_or_b32_e32 v2, v2, v0 | |
890 ; SI-NEXT: v_or_b32_e32 v1, v7, v5 | |
891 ; SI-NEXT: v_or_b32_e32 v0, v6, v4 | |
892 ; SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[4:7], 0 | |
893 ; SI-NEXT: s_endpgm | |
894 ; | |
895 ; FLAT-LABEL: v_brev_v2i64: | |
896 ; FLAT: ; %bb.0: | |
897 ; FLAT-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24 | |
898 ; FLAT-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x2c | |
899 ; FLAT-NEXT: v_lshlrev_b32_e32 v0, 4, v0 | |
900 ; FLAT-NEXT: v_mov_b32_e32 v8, 8 | |
901 ; FLAT-NEXT: s_mov_b32 s2, 0xff0000 | |
902 ; FLAT-NEXT: s_mov_b32 s3, 0xff000000 | |
903 ; FLAT-NEXT: s_waitcnt lgkmcnt(0) | |
904 ; FLAT-NEXT: v_mov_b32_e32 v1, s1 | |
905 ; FLAT-NEXT: v_add_u32_e32 v0, vcc, s0, v0 | |
906 ; FLAT-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc | |
907 ; FLAT-NEXT: flat_load_dwordx4 v[0:3], v[0:1] | |
908 ; FLAT-NEXT: s_mov_b32 s0, 0xff00 | |
909 ; FLAT-NEXT: s_mov_b32 s1, 0xf0f0f0f | |
910 ; FLAT-NEXT: s_mov_b32 s8, 0xf0f0f0f0 | |
911 ; FLAT-NEXT: s_mov_b32 s9, 0x33333333 | |
912 ; FLAT-NEXT: s_mov_b32 s10, 0xcccccccc | |
913 ; FLAT-NEXT: s_mov_b32 s11, 0x55555555 | |
914 ; FLAT-NEXT: s_mov_b32 s12, 0xaaaaaaaa | |
915 ; FLAT-NEXT: s_mov_b32 s7, 0xf000 | |
916 ; FLAT-NEXT: s_mov_b32 s6, -1 | |
917 ; FLAT-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0) | |
918 ; FLAT-NEXT: v_lshlrev_b64 v[4:5], 24, v[2:3] | |
919 ; FLAT-NEXT: v_lshlrev_b32_sdwa v11, v8, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 | |
920 ; FLAT-NEXT: v_lshlrev_b32_sdwa v14, v8, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_2 | |
921 ; FLAT-NEXT: v_lshlrev_b64 v[8:9], 8, v[0:1] | |
922 ; FLAT-NEXT: v_lshlrev_b64 v[6:7], 8, v[2:3] | |
923 ; FLAT-NEXT: v_alignbit_b32 v4, v3, v2, 24 | |
924 ; FLAT-NEXT: v_alignbit_b32 v10, v3, v2, 8 | |
925 ; FLAT-NEXT: v_or_b32_sdwa v3, v11, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3 | |
926 ; FLAT-NEXT: v_alignbit_b32 v12, v1, v0, 24 | |
927 ; FLAT-NEXT: v_alignbit_b32 v13, v1, v0, 8 | |
928 ; FLAT-NEXT: v_lshlrev_b32_e32 v8, 24, v0 | |
929 ; FLAT-NEXT: v_lshlrev_b32_e32 v15, 8, v0 | |
930 ; FLAT-NEXT: v_or_b32_sdwa v11, v14, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_3 | |
931 ; FLAT-NEXT: v_lshlrev_b64 v[0:1], 24, v[0:1] | |
932 ; FLAT-NEXT: v_lshlrev_b32_e32 v6, 24, v2 | |
933 ; FLAT-NEXT: v_lshlrev_b32_e32 v2, 8, v2 | |
934 ; FLAT-NEXT: v_and_b32_e32 v0, s2, v4 | |
935 ; FLAT-NEXT: v_and_b32_e32 v4, s3, v10 | |
936 ; FLAT-NEXT: v_and_b32_e32 v2, s2, v2 | |
937 ; FLAT-NEXT: v_or_b32_e32 v0, v4, v0 | |
938 ; FLAT-NEXT: v_and_b32_e32 v1, s0, v1 | |
939 ; FLAT-NEXT: v_and_b32_e32 v10, s2, v12 | |
940 ; FLAT-NEXT: v_and_b32_e32 v12, s3, v13 | |
941 ; FLAT-NEXT: v_and_b32_e32 v4, s0, v5 | |
942 ; FLAT-NEXT: v_and_b32_e32 v13, s2, v15 | |
943 ; FLAT-NEXT: v_or_b32_e32 v5, v12, v10 | |
944 ; FLAT-NEXT: v_or_b32_e32 v2, v6, v2 | |
945 ; FLAT-NEXT: v_or_b32_e32 v3, v0, v3 | |
946 ; FLAT-NEXT: v_or_b32_sdwa v0, v4, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 | |
947 ; FLAT-NEXT: v_or_b32_e32 v6, v8, v13 | |
948 ; FLAT-NEXT: v_or_b32_sdwa v1, v1, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0 | |
949 ; FLAT-NEXT: v_or_b32_e32 v7, v2, v0 | |
950 ; FLAT-NEXT: v_or_b32_e32 v5, v5, v11 | |
951 ; FLAT-NEXT: v_or_b32_e32 v8, v6, v1 | |
952 ; FLAT-NEXT: v_and_b32_e32 v0, s1, v3 | |
953 ; FLAT-NEXT: v_and_b32_e32 v1, s1, v7 | |
954 ; FLAT-NEXT: v_and_b32_e32 v2, s8, v3 | |
955 ; FLAT-NEXT: v_and_b32_e32 v3, s8, v7 | |
956 ; FLAT-NEXT: v_and_b32_e32 v4, s1, v5 | |
957 ; FLAT-NEXT: v_and_b32_e32 v6, s8, v5 | |
958 ; FLAT-NEXT: v_and_b32_e32 v5, s1, v8 | |
959 ; FLAT-NEXT: v_and_b32_e32 v7, s8, v8 | |
960 ; FLAT-NEXT: v_lshlrev_b64 v[0:1], 4, v[0:1] | |
961 ; FLAT-NEXT: v_lshrrev_b64 v[2:3], 4, v[2:3] | |
962 ; FLAT-NEXT: v_lshlrev_b64 v[4:5], 4, v[4:5] | |
963 ; FLAT-NEXT: v_lshrrev_b64 v[6:7], 4, v[6:7] | |
964 ; FLAT-NEXT: v_or_b32_e32 v3, v3, v1 | |
965 ; FLAT-NEXT: v_or_b32_e32 v2, v2, v0 | |
966 ; FLAT-NEXT: v_or_b32_e32 v7, v7, v5 | |
967 ; FLAT-NEXT: v_or_b32_e32 v6, v6, v4 | |
968 ; FLAT-NEXT: v_and_b32_e32 v1, s9, v3 | |
969 ; FLAT-NEXT: v_and_b32_e32 v0, s9, v2 | |
970 ; FLAT-NEXT: v_and_b32_e32 v5, s9, v7 | |
971 ; FLAT-NEXT: v_and_b32_e32 v4, s9, v6 | |
972 ; FLAT-NEXT: v_and_b32_e32 v3, s10, v3 | |
973 ; FLAT-NEXT: v_and_b32_e32 v2, s10, v2 | |
974 ; FLAT-NEXT: v_and_b32_e32 v7, s10, v7 | |
975 ; FLAT-NEXT: v_and_b32_e32 v6, s10, v6 | |
976 ; FLAT-NEXT: v_lshlrev_b64 v[0:1], 2, v[0:1] | |
977 ; FLAT-NEXT: v_lshrrev_b64 v[2:3], 2, v[2:3] | |
978 ; FLAT-NEXT: v_lshlrev_b64 v[4:5], 2, v[4:5] | |
979 ; FLAT-NEXT: v_lshrrev_b64 v[6:7], 2, v[6:7] | |
980 ; FLAT-NEXT: v_or_b32_e32 v3, v3, v1 | |
981 ; FLAT-NEXT: v_or_b32_e32 v2, v2, v0 | |
982 ; FLAT-NEXT: v_or_b32_e32 v7, v7, v5 | |
983 ; FLAT-NEXT: v_or_b32_e32 v6, v6, v4 | |
984 ; FLAT-NEXT: v_and_b32_e32 v1, s11, v3 | |
985 ; FLAT-NEXT: v_and_b32_e32 v0, s11, v2 | |
986 ; FLAT-NEXT: v_and_b32_e32 v5, s11, v7 | |
987 ; FLAT-NEXT: v_and_b32_e32 v4, s11, v6 | |
988 ; FLAT-NEXT: v_and_b32_e32 v3, s12, v3 | |
989 ; FLAT-NEXT: v_and_b32_e32 v2, s12, v2 | |
990 ; FLAT-NEXT: v_and_b32_e32 v7, s12, v7 | |
991 ; FLAT-NEXT: v_and_b32_e32 v6, s12, v6 | |
992 ; FLAT-NEXT: v_lshlrev_b64 v[0:1], 1, v[0:1] | |
993 ; FLAT-NEXT: v_lshrrev_b64 v[2:3], 1, v[2:3] | |
994 ; FLAT-NEXT: v_lshlrev_b64 v[4:5], 1, v[4:5] | |
995 ; FLAT-NEXT: v_lshrrev_b64 v[6:7], 1, v[6:7] | |
996 ; FLAT-NEXT: v_or_b32_e32 v3, v3, v1 | |
997 ; FLAT-NEXT: v_or_b32_e32 v2, v2, v0 | |
998 ; FLAT-NEXT: v_or_b32_e32 v1, v7, v5 | |
999 ; FLAT-NEXT: v_or_b32_e32 v0, v6, v4 | |
1000 ; FLAT-NEXT: buffer_store_dwordx4 v[0:3], off, s[4:7], 0 | |
1001 ; FLAT-NEXT: s_endpgm | |
108 %tid = call i32 @llvm.amdgcn.workitem.id.x() | 1002 %tid = call i32 @llvm.amdgcn.workitem.id.x() |
109 %gep = getelementptr <2 x i64> , <2 x i64> addrspace(1)* %valptr, i32 %tid | 1003 %gep = getelementptr <2 x i64> , <2 x i64> addrspace(1)* %valptr, i32 %tid |
110 %val = load <2 x i64>, <2 x i64> addrspace(1)* %gep | 1004 %val = load <2 x i64>, <2 x i64> addrspace(1)* %gep |
111 %brev = call <2 x i64> @llvm.bitreverse.v2i64(<2 x i64> %val) #1 | 1005 %brev = call <2 x i64> @llvm.bitreverse.v2i64(<2 x i64> %val) #1 |
112 store <2 x i64> %brev, <2 x i64> addrspace(1)* %out | 1006 store <2 x i64> %brev, <2 x i64> addrspace(1)* %out |
113 ret void | 1007 ret void |
114 } | 1008 } |
115 | 1009 |
116 ; FUNC-LABEL: {{^}}missing_truncate_promote_bitreverse: | |
117 ; VI: v_bfrev_b32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 | |
118 define float @missing_truncate_promote_bitreverse(i32 %arg) { | 1010 define float @missing_truncate_promote_bitreverse(i32 %arg) { |
1011 ; SI-LABEL: missing_truncate_promote_bitreverse: | |
1012 ; SI: ; %bb.0: ; %bb | |
1013 ; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) | |
1014 ; SI-NEXT: v_bfrev_b32_e32 v0, v0 | |
1015 ; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0 | |
1016 ; SI-NEXT: v_cvt_f32_f16_e32 v0, v0 | |
1017 ; SI-NEXT: s_setpc_b64 s[30:31] | |
1018 ; | |
1019 ; FLAT-LABEL: missing_truncate_promote_bitreverse: | |
1020 ; FLAT: ; %bb.0: ; %bb | |
1021 ; FLAT-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0) | |
1022 ; FLAT-NEXT: v_bfrev_b32_e32 v0, v0 | |
1023 ; FLAT-NEXT: v_cvt_f32_f16_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 | |
1024 ; FLAT-NEXT: s_setpc_b64 s[30:31] | |
119 bb: | 1025 bb: |
120 %tmp = trunc i32 %arg to i16 | 1026 %tmp = trunc i32 %arg to i16 |
121 %tmp1 = call i16 @llvm.bitreverse.i16(i16 %tmp) | 1027 %tmp1 = call i16 @llvm.bitreverse.i16(i16 %tmp) |
122 %tmp2 = bitcast i16 %tmp1 to half | 1028 %tmp2 = bitcast i16 %tmp1 to half |
123 %tmp3 = fpext half %tmp2 to float | 1029 %tmp3 = fpext half %tmp2 to float |