121
|
1 ; RUN: llc -march=amdgcn -mcpu=gfx901 -mattr=-flat-for-global -verify-machineinstrs -enable-packed-inlinable-literals < %s | FileCheck -check-prefix=GCN -check-prefix=GFX9 %s
|
|
2 ; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI -check-prefix=CIVI %s
|
|
3 ; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=CI -check-prefix=CIVI %s
|
|
4
|
|
5 ; GCN-LABEL: {{^}}s_shl_v2i16:
|
|
6 ; GFX9: s_load_dword [[LHS:s[0-9]+]]
|
|
7 ; GFX9: s_load_dword [[RHS:s[0-9]+]]
|
|
8 ; GFX9: v_mov_b32_e32 [[VLHS:v[0-9]+]], [[LHS]]
|
|
9 ; GFX9: v_pk_lshlrev_b16 [[RESULT:v[0-9]+]], [[RHS]], [[VLHS]]
|
|
10
|
|
11 ; VI: v_lshlrev_b32_e32
|
|
12 ; VI: v_lshlrev_b32_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
|
|
13 ; VI: v_or_b32_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
|
|
14
|
|
15 ; CI-DAG: v_lshlrev_b32_e32
|
|
16 ; CI-DAG: v_and_b32_e32 v{{[0-9]+}}, 0xffff, v{{[0-9]+}}
|
|
17 ; CI-DAG: v_lshlrev_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
|
|
18 ; CI-DAG: v_lshlrev_b32_e32 v{{[0-9]+}}, 16, v{{[0-9]+}}
|
|
19 ; CI: v_or_b32_e32
|
|
20 define amdgpu_kernel void @s_shl_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> %lhs, <2 x i16> %rhs) #0 {
|
|
21 %result = shl <2 x i16> %lhs, %rhs
|
|
22 store <2 x i16> %result, <2 x i16> addrspace(1)* %out
|
|
23 ret void
|
|
24 }
|
|
25
|
|
26 ; GCN-LABEL: {{^}}v_shl_v2i16:
|
|
27 ; GCN: {{buffer|flat|global}}_load_dword [[LHS:v[0-9]+]]
|
|
28 ; GCN: {{buffer|flat|global}}_load_dword [[RHS:v[0-9]+]]
|
|
29 ; GFX9: v_pk_lshlrev_b16 [[RESULT:v[0-9]+]], [[RHS]], [[LHS]]
|
|
30
|
|
31 ; VI: v_lshlrev_b16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
|
|
32 ; VI: v_lshlrev_b16_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
|
|
33 ; VI: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
|
|
34
|
|
35 ; CI: s_mov_b32 [[MASK:s[0-9]+]], 0xffff{{$}}
|
|
36 ; CI: v_lshrrev_b32_e32 v{{[0-9]+}}, 16, [[LHS]]
|
|
37 ; CI: v_lshrrev_b32_e32 v{{[0-9]+}}, 16, v{{[0-9]+}}
|
|
38 ; CI: v_lshlrev_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
|
|
39 ; CI: v_lshl_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
|
|
40 ; CI: v_lshlrev_b32_e32 v{{[0-9]+}}, 16, v{{[0-9]+}}
|
|
41 ; CI: v_and_b32_e32 v{{[0-9]+}}, [[MASK]], v{{[0-9]+}}
|
|
42 ; CI: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
|
|
43 define amdgpu_kernel void @v_shl_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in) #0 {
|
|
44 %tid = call i32 @llvm.amdgcn.workitem.id.x()
|
|
45 %tid.ext = sext i32 %tid to i64
|
|
46 %in.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %in, i64 %tid.ext
|
|
47 %out.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %out, i64 %tid.ext
|
|
48 %b_ptr = getelementptr <2 x i16>, <2 x i16> addrspace(1)* %in.gep, i32 1
|
|
49 %a = load <2 x i16>, <2 x i16> addrspace(1)* %in.gep
|
|
50 %b = load <2 x i16>, <2 x i16> addrspace(1)* %b_ptr
|
|
51 %result = shl <2 x i16> %a, %b
|
|
52 store <2 x i16> %result, <2 x i16> addrspace(1)* %out.gep
|
|
53 ret void
|
|
54 }
|
|
55
|
|
56 ; GCN-LABEL: {{^}}shl_v_s_v2i16:
|
|
57 ; GFX9: s_load_dword [[RHS:s[0-9]+]]
|
|
58 ; GFX9: {{buffer|flat|global}}_load_dword [[LHS:v[0-9]+]]
|
|
59 ; GFX9: v_pk_lshlrev_b16 [[RESULT:v[0-9]+]], [[RHS]], [[LHS]]
|
|
60 define amdgpu_kernel void @shl_v_s_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in, <2 x i16> %sgpr) #0 {
|
|
61 %tid = call i32 @llvm.amdgcn.workitem.id.x()
|
|
62 %tid.ext = sext i32 %tid to i64
|
|
63 %in.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %in, i64 %tid.ext
|
|
64 %out.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %out, i64 %tid.ext
|
|
65 %vgpr = load <2 x i16>, <2 x i16> addrspace(1)* %in.gep
|
|
66 %result = shl <2 x i16> %vgpr, %sgpr
|
|
67 store <2 x i16> %result, <2 x i16> addrspace(1)* %out.gep
|
|
68 ret void
|
|
69 }
|
|
70
|
|
71 ; GCN-LABEL: {{^}}shl_s_v_v2i16:
|
|
72 ; GFX9: s_load_dword [[LHS:s[0-9]+]]
|
|
73 ; GFX9: {{buffer|flat|global}}_load_dword [[RHS:v[0-9]+]]
|
|
74 ; GFX9: v_pk_lshlrev_b16 [[RESULT:v[0-9]+]], [[RHS]], [[LHS]]
|
|
75 define amdgpu_kernel void @shl_s_v_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in, <2 x i16> %sgpr) #0 {
|
|
76 %tid = call i32 @llvm.amdgcn.workitem.id.x()
|
|
77 %tid.ext = sext i32 %tid to i64
|
|
78 %in.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %in, i64 %tid.ext
|
|
79 %out.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %out, i64 %tid.ext
|
|
80 %vgpr = load <2 x i16>, <2 x i16> addrspace(1)* %in.gep
|
|
81 %result = shl <2 x i16> %sgpr, %vgpr
|
|
82 store <2 x i16> %result, <2 x i16> addrspace(1)* %out.gep
|
|
83 ret void
|
|
84 }
|
|
85
|
|
86 ; GCN-LABEL: {{^}}shl_imm_v_v2i16:
|
|
87 ; GCN: {{buffer|flat|global}}_load_dword [[RHS:v[0-9]+]]
|
|
88 ; GFX9: v_pk_lshlrev_b16 [[RESULT:v[0-9]+]], [[RHS]], 8
|
|
89 define amdgpu_kernel void @shl_imm_v_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in) #0 {
|
|
90 %tid = call i32 @llvm.amdgcn.workitem.id.x()
|
|
91 %tid.ext = sext i32 %tid to i64
|
|
92 %in.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %in, i64 %tid.ext
|
|
93 %out.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %out, i64 %tid.ext
|
|
94 %vgpr = load <2 x i16>, <2 x i16> addrspace(1)* %in.gep
|
|
95 %result = shl <2 x i16> <i16 8, i16 8>, %vgpr
|
|
96 store <2 x i16> %result, <2 x i16> addrspace(1)* %out.gep
|
|
97 ret void
|
|
98 }
|
|
99
|
|
100 ; GCN-LABEL: {{^}}shl_v_imm_v2i16:
|
|
101 ; GCN: {{buffer|flat|global}}_load_dword [[LHS:v[0-9]+]]
|
|
102 ; GFX9: v_pk_lshlrev_b16 [[RESULT:v[0-9]+]], 8, [[LHS]]
|
|
103 define amdgpu_kernel void @shl_v_imm_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in) #0 {
|
|
104 %tid = call i32 @llvm.amdgcn.workitem.id.x()
|
|
105 %tid.ext = sext i32 %tid to i64
|
|
106 %in.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %in, i64 %tid.ext
|
|
107 %out.gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %out, i64 %tid.ext
|
|
108 %vgpr = load <2 x i16>, <2 x i16> addrspace(1)* %in.gep
|
|
109 %result = shl <2 x i16> %vgpr, <i16 8, i16 8>
|
|
110 store <2 x i16> %result, <2 x i16> addrspace(1)* %out.gep
|
|
111 ret void
|
|
112 }
|
|
113
|
|
114 ; GCN-LABEL: {{^}}v_shl_v4i16:
|
|
115 ; GCN: {{buffer|flat|global}}_load_dwordx2
|
|
116 ; GCN: {{buffer|flat|global}}_load_dwordx2
|
|
117 ; GFX9: v_pk_lshlrev_b16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
|
|
118 ; GFX9: v_pk_lshlrev_b16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
|
|
119 ; GCN: {{buffer|flat|global}}_store_dwordx2
|
|
120 define amdgpu_kernel void @v_shl_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> addrspace(1)* %in) #0 {
|
|
121 %tid = call i32 @llvm.amdgcn.workitem.id.x()
|
|
122 %tid.ext = sext i32 %tid to i64
|
|
123 %in.gep = getelementptr inbounds <4 x i16>, <4 x i16> addrspace(1)* %in, i64 %tid.ext
|
|
124 %out.gep = getelementptr inbounds <4 x i16>, <4 x i16> addrspace(1)* %out, i64 %tid.ext
|
|
125 %b_ptr = getelementptr <4 x i16>, <4 x i16> addrspace(1)* %in.gep, i32 1
|
|
126 %a = load <4 x i16>, <4 x i16> addrspace(1)* %in.gep
|
|
127 %b = load <4 x i16>, <4 x i16> addrspace(1)* %b_ptr
|
|
128 %result = shl <4 x i16> %a, %b
|
|
129 store <4 x i16> %result, <4 x i16> addrspace(1)* %out.gep
|
|
130 ret void
|
|
131 }
|
|
132
|
|
133 ; GCN-LABEL: {{^}}shl_v_imm_v4i16:
|
|
134 ; GCN: {{buffer|flat|global}}_load_dwordx2
|
|
135 ; GFX9: v_pk_lshlrev_b16 v{{[0-9]+}}, 8, v{{[0-9]+}}
|
|
136 ; GFX9: v_pk_lshlrev_b16 v{{[0-9]+}}, 8, v{{[0-9]+}}
|
|
137 ; GCN: {{buffer|flat|global}}_store_dwordx2
|
|
138 define amdgpu_kernel void @shl_v_imm_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> addrspace(1)* %in) #0 {
|
|
139 %tid = call i32 @llvm.amdgcn.workitem.id.x()
|
|
140 %tid.ext = sext i32 %tid to i64
|
|
141 %in.gep = getelementptr inbounds <4 x i16>, <4 x i16> addrspace(1)* %in, i64 %tid.ext
|
|
142 %out.gep = getelementptr inbounds <4 x i16>, <4 x i16> addrspace(1)* %out, i64 %tid.ext
|
|
143 %vgpr = load <4 x i16>, <4 x i16> addrspace(1)* %in.gep
|
|
144 %result = shl <4 x i16> %vgpr, <i16 8, i16 8, i16 8, i16 8>
|
|
145 store <4 x i16> %result, <4 x i16> addrspace(1)* %out.gep
|
|
146 ret void
|
|
147 }
|
|
148
|
|
149 declare i32 @llvm.amdgcn.workitem.id.x() #1
|
|
150
|
|
151 attributes #0 = { nounwind }
|
|
152 attributes #1 = { nounwind readnone }
|