121
|
1 ; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=fiji -amdgpu-sdwa-peephole=0 -mattr=-fp64-fp16-denormals -verify-machineinstrs < %s | FileCheck -check-prefix=NOSDWA -check-prefix=GCN %s
|
|
2 ; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=fiji -amdgpu-sdwa-peephole -mattr=-fp64-fp16-denormals -verify-machineinstrs < %s | FileCheck -check-prefix=VI -check-prefix=SDWA -check-prefix=GCN %s
|
|
3 ; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=gfx900 -amdgpu-sdwa-peephole -mattr=-fp64-fp16-denormals -verify-machineinstrs < %s | FileCheck -check-prefix=GFX9 -check-prefix=SDWA -check-prefix=GCN %s
|
|
4
|
|
5 ; GCN-LABEL: {{^}}add_shr_i32:
|
|
6 ; NOSDWA: v_lshrrev_b32_e32 v[[DST:[0-9]+]], 16, v{{[0-9]+}}
|
|
7 ; NOSDWA: v_add_i32_e32 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v[[DST]]
|
|
8 ; NOSDWA-NOT: v_add_i32_sdwa
|
|
9
|
|
10 ; SDWA: v_add_i32_sdwa v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
|
|
11
|
|
12 define amdgpu_kernel void @add_shr_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
|
|
13 %a = load i32, i32 addrspace(1)* %in, align 4
|
|
14 %shr = lshr i32 %a, 16
|
|
15 %add = add i32 %a, %shr
|
|
16 store i32 %add, i32 addrspace(1)* %out, align 4
|
|
17 ret void
|
|
18 }
|
|
19
|
|
20 ; GCN-LABEL: {{^}}sub_shr_i32:
|
|
21 ; NOSDWA: v_lshrrev_b32_e32 v[[DST:[0-9]+]], 16, v{{[0-9]+}}
|
|
22 ; NOSDWA: v_subrev_i32_e32 v{{[0-9]+}}, vcc, v{{[0-9]+}}, v[[DST]]
|
|
23 ; NOSDWA-NOT: v_subrev_i32_sdwa
|
|
24
|
|
25 ; SDWA: v_subrev_i32_sdwa v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
|
|
26
|
|
27 define amdgpu_kernel void @sub_shr_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
|
|
28 %a = load i32, i32 addrspace(1)* %in, align 4
|
|
29 %shr = lshr i32 %a, 16
|
|
30 %sub = sub i32 %shr, %a
|
|
31 store i32 %sub, i32 addrspace(1)* %out, align 4
|
|
32 ret void
|
|
33 }
|
|
34
|
|
35 ; GCN-LABEL: {{^}}mul_shr_i32:
|
|
36 ; NOSDWA: v_lshrrev_b32_e32 v[[DST0:[0-9]+]], 16, v{{[0-9]+}}
|
|
37 ; NOSDWA: v_lshrrev_b32_e32 v[[DST1:[0-9]+]], 16, v{{[0-9]+}}
|
|
38 ; NOSDWA: v_mul_u32_u24_e32 v{{[0-9]+}}, v[[DST0]], v[[DST1]]
|
|
39 ; NOSDWA-NOT: v_mul_u32_u24_sdwa
|
|
40
|
|
41 ; SDWA: v_mul_u32_u24_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
|
|
42
|
|
43 define amdgpu_kernel void @mul_shr_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in1, i32 addrspace(1)* %in2) {
|
|
44 %a = load i32, i32 addrspace(1)* %in1, align 4
|
|
45 %b = load i32, i32 addrspace(1)* %in2, align 4
|
|
46 %shra = lshr i32 %a, 16
|
|
47 %shrb = lshr i32 %b, 16
|
|
48 %mul = mul i32 %shra, %shrb
|
|
49 store i32 %mul, i32 addrspace(1)* %out, align 4
|
|
50 ret void
|
|
51 }
|
|
52
|
|
53 ; GCN-LABEL: {{^}}mul_i16:
|
|
54 ; NOSDWA: v_mul_lo_i32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
|
|
55 ; NOSDWA-NOT: v_mul_u32_u24_sdwa
|
|
56 ; SDWA: v_mul_lo_i32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
|
|
57 ; SDWA-NOT: v_mul_u32_u24_sdwa
|
|
58
|
|
59 define amdgpu_kernel void @mul_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %ina, i16 addrspace(1)* %inb) {
|
|
60 entry:
|
|
61 %a = load i16, i16 addrspace(1)* %ina, align 4
|
|
62 %b = load i16, i16 addrspace(1)* %inb, align 4
|
|
63 %mul = mul i16 %a, %b
|
|
64 store i16 %mul, i16 addrspace(1)* %out, align 4
|
|
65 ret void
|
|
66 }
|
|
67
|
|
68 ; GCN-LABEL: {{^}}mul_v2i16:
|
|
69 ; NOSDWA: v_lshrrev_b32_e32 v[[DST0:[0-9]+]], 16, v{{[0-9]+}}
|
|
70 ; NOSDWA: v_lshrrev_b32_e32 v[[DST1:[0-9]+]], 16, v{{[0-9]+}}
|
|
71 ; NOSDWA: v_mul_u32_u24_e32 v[[DST_MUL:[0-9]+]], v[[DST0]], v[[DST1]]
|
|
72 ; NOSDWA: v_lshlrev_b32_e32 v[[DST_SHL:[0-9]+]], 16, v[[DST_MUL]]
|
|
73 ; NOSDWA: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v[[DST_SHL]]
|
|
74 ; NOSDWA-NOT: v_mul_u32_u24_sdwa
|
|
75
|
|
76 ; VI-DAG: v_mul_u32_u24_sdwa v[[DST_MUL_LO:[0-9]+]], v{{[0-9]+}}, v{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:WORD_0
|
|
77 ; VI-DAG: v_mul_u32_u24_sdwa v[[DST_MUL_HI:[0-9]+]], v{{[0-9]+}}, v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
|
|
78 ; VI: v_or_b32_sdwa v{{[0-9]+}}, v[[DST_MUL_LO]], v[[DST_MUL_HI]] dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
|
|
79
|
|
80 ; GFX9: v_pk_mul_lo_u16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
|
|
81
|
|
82 define amdgpu_kernel void @mul_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %ina, <2 x i16> addrspace(1)* %inb) {
|
|
83 entry:
|
|
84 %a = load <2 x i16>, <2 x i16> addrspace(1)* %ina, align 4
|
|
85 %b = load <2 x i16>, <2 x i16> addrspace(1)* %inb, align 4
|
|
86 %mul = mul <2 x i16> %a, %b
|
|
87 store <2 x i16> %mul, <2 x i16> addrspace(1)* %out, align 4
|
|
88 ret void
|
|
89 }
|
|
90
|
|
91 ; GCN-LABEL: {{^}}mul_v4i16:
|
|
92 ; NOSDWA: v_lshrrev_b32_e32 v{{[0-9]+}}, 16, v{{[0-9]+}}
|
|
93 ; NOSDWA: v_lshrrev_b32_e32 v{{[0-9]+}}, 16, v{{[0-9]+}}
|
|
94 ; NOSDWA: v_mul_u32_u24_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
|
|
95 ; NOSDWA: v_lshlrev_b32_e32 v{{[0-9]+}}, 16, v{{[0-9]+}}
|
|
96 ; NOSDWA: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
|
|
97 ; NOSDWA-NOT: v_mul_u32_u24_sdwa
|
|
98
|
|
99 ; VI-DAG: v_mul_u32_u24_sdwa v[[DST_MUL0:[0-9]+]], v{{[0-9]+}}, v{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:WORD_0
|
|
100 ; VI-DAG: v_mul_u32_u24_sdwa v[[DST_MUL1:[0-9]+]], v{{[0-9]+}}, v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
|
|
101 ; VI-DAG: v_mul_u32_u24_sdwa v[[DST_MUL2:[0-9]+]], v{{[0-9]+}}, v{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:WORD_0
|
|
102 ; VI-DAG: v_mul_u32_u24_sdwa v[[DST_MUL3:[0-9]+]], v{{[0-9]+}}, v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
|
|
103 ; VI-DAG: v_or_b32_sdwa v{{[0-9]+}}, v[[DST_MUL2]], v[[DST_MUL3]] dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
|
|
104 ; VI-DAG: v_or_b32_sdwa v{{[0-9]+}}, v[[DST_MUL0]], v[[DST_MUL1]] dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
|
|
105
|
|
106 ; GFX9-DAG: v_pk_mul_lo_u16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
|
|
107 ; GFX9-DAG: v_pk_mul_lo_u16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
|
|
108
|
|
109 define amdgpu_kernel void @mul_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> addrspace(1)* %ina, <4 x i16> addrspace(1)* %inb) {
|
|
110 entry:
|
|
111 %a = load <4 x i16>, <4 x i16> addrspace(1)* %ina, align 4
|
|
112 %b = load <4 x i16>, <4 x i16> addrspace(1)* %inb, align 4
|
|
113 %mul = mul <4 x i16> %a, %b
|
|
114 store <4 x i16> %mul, <4 x i16> addrspace(1)* %out, align 4
|
|
115 ret void
|
|
116 }
|
|
117
|
|
118 ; GCN-LABEL: {{^}}mul_v8i16:
|
|
119 ; NOSDWA: v_lshrrev_b32_e32 v{{[0-9]+}}, 16, v{{[0-9]+}}
|
|
120 ; NOSDWA: v_lshrrev_b32_e32 v{{[0-9]+}}, 16, v{{[0-9]+}}
|
|
121 ; NOSDWA: v_mul_u32_u24_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
|
|
122 ; NOSDWA: v_lshlrev_b32_e32 v{{[0-9]+}}, 16, v{{[0-9]+}}
|
|
123 ; NOSDWA: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
|
|
124 ; NOSDWA-NOT: v_mul_u32_u24_sdwa
|
|
125
|
|
126 ; VI-DAG: v_mul_u32_u24_sdwa v[[DST_MUL0:[0-9]+]], v{{[0-9]+}}, v{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:WORD_0
|
|
127 ; VI-DAG: v_mul_u32_u24_sdwa v[[DST_MUL1:[0-9]+]], v{{[0-9]+}}, v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
|
|
128 ; VI-DAG: v_mul_u32_u24_sdwa v[[DST_MUL2:[0-9]+]], v{{[0-9]+}}, v{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:WORD_0
|
|
129 ; VI-DAG: v_mul_u32_u24_sdwa v[[DST_MUL3:[0-9]+]], v{{[0-9]+}}, v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
|
|
130 ; VI-DAG: v_mul_u32_u24_sdwa v[[DST_MUL4:[0-9]+]], v{{[0-9]+}}, v{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:WORD_0
|
|
131 ; VI-DAG: v_mul_u32_u24_sdwa v[[DST_MUL5:[0-9]+]], v{{[0-9]+}}, v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
|
|
132 ; VI-DAG: v_mul_u32_u24_sdwa v[[DST_MUL6:[0-9]+]], v{{[0-9]+}}, v{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:WORD_0
|
|
133 ; VI-DAG: v_mul_u32_u24_sdwa v[[DST_MUL7:[0-9]+]], v{{[0-9]+}}, v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
|
|
134 ; VI-DAG: v_or_b32_sdwa v{{[0-9]+}}, v[[DST_MUL6]], v[[DST_MUL7]] dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
|
|
135 ; VI-DAG: v_or_b32_sdwa v{{[0-9]+}}, v[[DST_MUL4]], v[[DST_MUL5]] dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
|
|
136 ; VI-DAG: v_or_b32_sdwa v{{[0-9]+}}, v[[DST_MUL2]], v[[DST_MUL3]] dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
|
|
137 ; VI-DAG: v_or_b32_sdwa v{{[0-9]+}}, v[[DST_MUL0]], v[[DST_MUL1]] dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
|
|
138
|
|
139 ; GFX9-DAG: v_pk_mul_lo_u16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
|
|
140 ; GFX9-DAG: v_pk_mul_lo_u16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
|
|
141 ; GFX9-DAG: v_pk_mul_lo_u16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
|
|
142 ; GFX9-DAG: v_pk_mul_lo_u16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
|
|
143
|
|
144 define amdgpu_kernel void @mul_v8i16(<8 x i16> addrspace(1)* %out, <8 x i16> addrspace(1)* %ina, <8 x i16> addrspace(1)* %inb) {
|
|
145 entry:
|
|
146 %a = load <8 x i16>, <8 x i16> addrspace(1)* %ina, align 4
|
|
147 %b = load <8 x i16>, <8 x i16> addrspace(1)* %inb, align 4
|
|
148 %mul = mul <8 x i16> %a, %b
|
|
149 store <8 x i16> %mul, <8 x i16> addrspace(1)* %out, align 4
|
|
150 ret void
|
|
151 }
|
|
152
|
|
153 ; GCN-LABEL: {{^}}mul_half:
|
|
154 ; NOSDWA: v_mul_f16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
|
|
155 ; NOSDWA-NOT: v_mul_f16_sdwa
|
|
156 ; SDWA: v_mul_f16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
|
|
157 ; SDWA-NOT: v_mul_f16_sdwa
|
|
158
|
|
159 define amdgpu_kernel void @mul_half(half addrspace(1)* %out, half addrspace(1)* %ina, half addrspace(1)* %inb) {
|
|
160 entry:
|
|
161 %a = load half, half addrspace(1)* %ina, align 4
|
|
162 %b = load half, half addrspace(1)* %inb, align 4
|
|
163 %mul = fmul half %a, %b
|
|
164 store half %mul, half addrspace(1)* %out, align 4
|
|
165 ret void
|
|
166 }
|
|
167
|
|
168 ; GCN-LABEL: {{^}}mul_v2half:
|
|
169 ; NOSDWA: v_lshrrev_b32_e32 v[[DST0:[0-9]+]], 16, v{{[0-9]+}}
|
|
170 ; NOSDWA: v_lshrrev_b32_e32 v[[DST1:[0-9]+]], 16, v{{[0-9]+}}
|
|
171 ; NOSDWA: v_mul_f16_e32 v[[DST_MUL:[0-9]+]], v[[DST0]], v[[DST1]]
|
|
172 ; NOSDWA: v_lshlrev_b32_e32 v[[DST_SHL:[0-9]+]], 16, v[[DST_MUL]]
|
|
173 ; NOSDWA: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v[[DST_SHL]]
|
|
174 ; NOSDWA-NOT: v_mul_f16_sdwa
|
|
175
|
|
176 ; VI-DAG: v_mul_f16_sdwa v[[DST_MUL_HI:[0-9]+]], v{{[0-9]+}}, v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
|
|
177 ; VI-DAG: v_mul_f16_e32 v[[DST_MUL_LO:[0-9]+]], v{{[0-9]+}}, v{{[0-9]+}}
|
|
178 ; VI: v_or_b32_e32 v{{[0-9]+}}, v[[DST_MUL_LO]], v[[DST_MUL_HI]]
|
|
179
|
|
180 ; GFX9: v_pk_mul_f16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
|
|
181
|
|
182 define amdgpu_kernel void @mul_v2half(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %ina, <2 x half> addrspace(1)* %inb) {
|
|
183 entry:
|
|
184 %a = load <2 x half>, <2 x half> addrspace(1)* %ina, align 4
|
|
185 %b = load <2 x half>, <2 x half> addrspace(1)* %inb, align 4
|
|
186 %mul = fmul <2 x half> %a, %b
|
|
187 store <2 x half> %mul, <2 x half> addrspace(1)* %out, align 4
|
|
188 ret void
|
|
189 }
|
|
190
|
|
191 ; GCN-LABEL: {{^}}mul_v4half:
|
|
192 ; NOSDWA: v_lshrrev_b32_e32 v{{[0-9]+}}, 16, v{{[0-9]+}}
|
|
193 ; NOSDWA: v_lshrrev_b32_e32 v{{[0-9]+}}, 16, v{{[0-9]+}}
|
|
194 ; NOSDWA: v_mul_f16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
|
|
195 ; NOSDWA: v_lshlrev_b32_e32 v{{[0-9]+}}, 16, v{{[0-9]+}}
|
|
196 ; NOSDWA: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
|
|
197 ; NOSDWA-NOT: v_mul_f16_sdwa
|
|
198
|
|
199 ; VI-DAG: v_mul_f16_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
|
|
200 ; VI-DAG: v_mul_f16_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
|
|
201 ; VI-DAG: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
|
|
202 ; VI-DAG: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
|
|
203
|
|
204 ; GFX9-DAG: v_pk_mul_f16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
|
|
205 ; GFX9-DAG: v_pk_mul_f16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
|
|
206
|
|
207 define amdgpu_kernel void @mul_v4half(<4 x half> addrspace(1)* %out, <4 x half> addrspace(1)* %ina, <4 x half> addrspace(1)* %inb) {
|
|
208 entry:
|
|
209 %a = load <4 x half>, <4 x half> addrspace(1)* %ina, align 4
|
|
210 %b = load <4 x half>, <4 x half> addrspace(1)* %inb, align 4
|
|
211 %mul = fmul <4 x half> %a, %b
|
|
212 store <4 x half> %mul, <4 x half> addrspace(1)* %out, align 4
|
|
213 ret void
|
|
214 }
|
|
215
|
|
216 ; GCN-LABEL: {{^}}mul_v8half:
|
|
217 ; NOSDWA: v_lshrrev_b32_e32 v{{[0-9]+}}, 16, v{{[0-9]+}}
|
|
218 ; NOSDWA: v_lshrrev_b32_e32 v{{[0-9]+}}, 16, v{{[0-9]+}}
|
|
219 ; NOSDWA: v_mul_f16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
|
|
220 ; NOSDWA: v_lshlrev_b32_e32 v{{[0-9]+}}, 16, v{{[0-9]+}}
|
|
221 ; NOSDWA: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
|
|
222 ; NOSDWA-NOT: v_mul_f16_sdwa
|
|
223
|
|
224 ; VI-DAG: v_mul_f16_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
|
|
225 ; VI-DAG: v_mul_f16_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
|
|
226 ; VI-DAG: v_mul_f16_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
|
|
227 ; VI-DAG: v_mul_f16_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
|
|
228 ; VI-DAG: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
|
|
229 ; VI-DAG: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
|
|
230 ; VI-DAG: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
|
|
231 ; VI-DAG: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
|
|
232
|
|
233 ; GFX9-DAG: v_pk_mul_f16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
|
|
234 ; GFX9-DAG: v_pk_mul_f16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
|
|
235 ; GFX9-DAG: v_pk_mul_f16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
|
|
236 ; GFX9-DAG: v_pk_mul_f16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
|
|
237
|
|
238 define amdgpu_kernel void @mul_v8half(<8 x half> addrspace(1)* %out, <8 x half> addrspace(1)* %ina, <8 x half> addrspace(1)* %inb) {
|
|
239 entry:
|
|
240 %a = load <8 x half>, <8 x half> addrspace(1)* %ina, align 4
|
|
241 %b = load <8 x half>, <8 x half> addrspace(1)* %inb, align 4
|
|
242 %mul = fmul <8 x half> %a, %b
|
|
243 store <8 x half> %mul, <8 x half> addrspace(1)* %out, align 4
|
|
244 ret void
|
|
245 }
|
|
246
|
|
247 ; GCN-LABEL: {{^}}mul_i8:
|
|
248 ; NOSDWA: v_mul_lo_i32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
|
|
249 ; NOSDWA-NOT: v_mul_u32_u24_sdwa
|
|
250 ; SDWA: v_mul_lo_i32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
|
|
251 ; SDWA-NOT: v_mul_u32_u24_sdwa
|
|
252
|
|
253 define amdgpu_kernel void @mul_i8(i8 addrspace(1)* %out, i8 addrspace(1)* %ina, i8 addrspace(1)* %inb) {
|
|
254 entry:
|
|
255 %a = load i8, i8 addrspace(1)* %ina, align 4
|
|
256 %b = load i8, i8 addrspace(1)* %inb, align 4
|
|
257 %mul = mul i8 %a, %b
|
|
258 store i8 %mul, i8 addrspace(1)* %out, align 4
|
|
259 ret void
|
|
260 }
|
|
261
|
|
262 ; GCN-LABEL: {{^}}mul_v2i8:
|
|
263 ; NOSDWA: v_lshrrev_b16_e32 v{{[0-9]+}}, 8, v{{[0-9]+}}
|
|
264 ; NOSDWA: v_lshrrev_b16_e32 v{{[0-9]+}}, 8, v{{[0-9]+}}
|
|
265 ; NOSDWA: v_mul_u32_u24_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
|
|
266 ; NOSDWA: v_lshlrev_b16_e32 v{{[0-9]+}}, 8, v{{[0-9]+}}
|
|
267 ; NOSDWA: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
|
|
268 ; NOSDWA-NOT: v_mul_u32_u24_sdwa
|
|
269
|
|
270 ; VI: v_mul_u32_u24_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:BYTE_1
|
|
271
|
|
272 ; GFX9-DAG: v_mul_lo_u16_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:BYTE_1
|
|
273 ; GFX9-DAG: v_mul_lo_u16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
|
|
274 ; GFX9: v_or_b32_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
|
|
275
|
|
276 define amdgpu_kernel void @mul_v2i8(<2 x i8> addrspace(1)* %out, <2 x i8> addrspace(1)* %ina, <2 x i8> addrspace(1)* %inb) {
|
|
277 entry:
|
|
278 %a = load <2 x i8>, <2 x i8> addrspace(1)* %ina, align 4
|
|
279 %b = load <2 x i8>, <2 x i8> addrspace(1)* %inb, align 4
|
|
280 %mul = mul <2 x i8> %a, %b
|
|
281 store <2 x i8> %mul, <2 x i8> addrspace(1)* %out, align 4
|
|
282 ret void
|
|
283 }
|
|
284
|
|
285 ; GCN-LABEL: {{^}}mul_v4i8:
|
|
286 ; NOSDWA: v_lshrrev_b16_e32 v{{[0-9]+}}, 8, v{{[0-9]+}}
|
|
287 ; NOSDWA: v_lshrrev_b16_e32 v{{[0-9]+}}, 8, v{{[0-9]+}}
|
|
288 ; NOSDWA: v_mul_u32_u24_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
|
|
289 ; NOSDWA: v_lshlrev_b16_e32 v{{[0-9]+}}, 8, v{{[0-9]+}}
|
|
290 ; NOSDWA: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
|
|
291 ; NOSDWA-NOT: v_mul_u32_u24_sdwa
|
|
292
|
|
293 ; VI-DAG: v_mul_u32_u24_sdwa
|
|
294 ; VI-DAG: v_mul_u32_u24_sdwa
|
|
295 ; VI-DAG: v_mul_u32_u24_sdwa
|
|
296
|
|
297 ; GFX9-DAG: v_mul_lo_u16_sdwa
|
|
298 ; GFX9-DAG: v_mul_lo_u16_sdwa
|
|
299 ; GFX9-DAG: v_mul_lo_u16_sdwa
|
|
300
|
|
301 define amdgpu_kernel void @mul_v4i8(<4 x i8> addrspace(1)* %out, <4 x i8> addrspace(1)* %ina, <4 x i8> addrspace(1)* %inb) {
|
|
302 entry:
|
|
303 %a = load <4 x i8>, <4 x i8> addrspace(1)* %ina, align 4
|
|
304 %b = load <4 x i8>, <4 x i8> addrspace(1)* %inb, align 4
|
|
305 %mul = mul <4 x i8> %a, %b
|
|
306 store <4 x i8> %mul, <4 x i8> addrspace(1)* %out, align 4
|
|
307 ret void
|
|
308 }
|
|
309
|
|
310 ; GCN-LABEL: {{^}}mul_v8i8:
|
|
311 ; NOSDWA: v_lshrrev_b16_e32 v{{[0-9]+}}, 8, v{{[0-9]+}}
|
|
312 ; NOSDWA: v_lshrrev_b16_e32 v{{[0-9]+}}, 8, v{{[0-9]+}}
|
|
313 ; NOSDWA: v_mul_u32_u24_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
|
|
314 ; NOSDWA: v_lshlrev_b16_e32 v{{[0-9]+}}, 8, v{{[0-9]+}}
|
|
315 ; NOSDWA: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
|
|
316 ; NOSDWA-NOT: v_mul_u32_u24_sdwa
|
|
317
|
|
318 ; VI-DAG: v_mul_u32_u24_sdwa
|
|
319 ; VI-DAG: v_mul_u32_u24_sdwa
|
|
320 ; VI-DAG: v_mul_u32_u24_sdwa
|
|
321 ; VI-DAG: v_mul_u32_u24_sdwa
|
|
322 ; VI-DAG: v_mul_u32_u24_sdwa
|
|
323 ; VI-DAG: v_mul_u32_u24_sdwa
|
|
324
|
|
325 ; GFX9-DAG: v_mul_lo_u16_sdwa
|
|
326 ; GFX9-DAG: v_mul_lo_u16_sdwa
|
|
327 ; GFX9-DAG: v_mul_lo_u16_sdwa
|
|
328 ; GFX9-DAG: v_mul_lo_u16_sdwa
|
|
329 ; GFX9-DAG: v_mul_lo_u16_sdwa
|
|
330 ; GFX9-DAG: v_mul_lo_u16_sdwa
|
|
331
|
|
332 define amdgpu_kernel void @mul_v8i8(<8 x i8> addrspace(1)* %out, <8 x i8> addrspace(1)* %ina, <8 x i8> addrspace(1)* %inb) {
|
|
333 entry:
|
|
334 %a = load <8 x i8>, <8 x i8> addrspace(1)* %ina, align 4
|
|
335 %b = load <8 x i8>, <8 x i8> addrspace(1)* %inb, align 4
|
|
336 %mul = mul <8 x i8> %a, %b
|
|
337 store <8 x i8> %mul, <8 x i8> addrspace(1)* %out, align 4
|
|
338 ret void
|
|
339 }
|
|
340
|
|
341 ; GCN-LABEL: {{^}}sitofp_v2i16_to_v2f16:
|
|
342 ; NOSDWA-DAG: v_bfe_i32 v{{[0-9]+}}, v{{[0-9]+}}, 0, 16
|
|
343 ; NOSDWA-DAG: v_ashrrev_i32_e32 v{{[0-9]+}}, 16, v{{[0-9]+}}
|
|
344 ; NOSDWA-DAG: v_cvt_f32_i32_e32 v{{[0-9]+}}, v{{[0-9]+}}
|
|
345 ; NOSDWA-DAG: v_cvt_f32_i32_e32 v{{[0-9]+}}, v{{[0-9]+}}
|
|
346 ; NOSDWA-NOT: v_cvt_f32_i32_sdwa
|
|
347
|
|
348 ; SDWA-DAG: v_cvt_f32_i32_sdwa v{{[0-9]+}}, sext(v{{[0-9]+}}) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
|
|
349 ; SDWA-DAG: v_cvt_f32_i32_sdwa v{{[0-9]+}}, sext(v{{[0-9]+}}) dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
|
|
350
|
|
351 define amdgpu_kernel void @sitofp_v2i16_to_v2f16(
|
|
352 <2 x half> addrspace(1)* %r,
|
|
353 <2 x i16> addrspace(1)* %a) {
|
|
354 entry:
|
|
355 %a.val = load <2 x i16>, <2 x i16> addrspace(1)* %a
|
|
356 %r.val = sitofp <2 x i16> %a.val to <2 x half>
|
|
357 store <2 x half> %r.val, <2 x half> addrspace(1)* %r
|
|
358 ret void
|
|
359 }
|
|
360
|
|
361
|
|
362 ; GCN-LABEL: {{^}}mac_v2half:
|
|
363 ; NOSDWA: v_lshrrev_b32_e32 v[[DST0:[0-9]+]], 16, v{{[0-9]+}}
|
|
364 ; NOSDWA: v_lshrrev_b32_e32 v[[DST1:[0-9]+]], 16, v{{[0-9]+}}
|
|
365 ; NOSDWA: v_mac_f16_e32 v[[DST_MAC:[0-9]+]], v[[DST0]], v[[DST1]]
|
|
366 ; NOSDWA: v_lshlrev_b32_e32 v[[DST_SHL:[0-9]+]], 16, v[[DST_MAC]]
|
|
367 ; NOSDWA: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v[[DST_SHL]]
|
|
368 ; NOSDWA-NOT: v_mac_f16_sdwa
|
|
369
|
|
370 ; VI: v_mac_f16_sdwa v[[DST_MAC:[0-9]+]], v{{[0-9]+}}, v{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
|
|
371 ; VI: v_lshlrev_b32_e32 v[[DST_SHL:[0-9]+]], 16, v[[DST_MAC]]
|
|
372
|
|
373 ; GFX9: v_pk_mul_f16 v[[DST_MUL:[0-9]+]], v{{[0-9]+}}, v[[SRC:[0-9]+]]
|
|
374 ; GFX9: v_pk_add_f16 v{{[0-9]+}}, v[[DST_MUL]], v[[SRC]]
|
|
375
|
|
376 define amdgpu_kernel void @mac_v2half(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %ina, <2 x half> addrspace(1)* %inb) {
|
|
377 entry:
|
|
378 %a = load <2 x half>, <2 x half> addrspace(1)* %ina, align 4
|
|
379 %b = load <2 x half>, <2 x half> addrspace(1)* %inb, align 4
|
|
380 %mul = fmul <2 x half> %a, %b
|
|
381 %mac = fadd <2 x half> %mul, %b
|
|
382 store <2 x half> %mac, <2 x half> addrspace(1)* %out, align 4
|
|
383 ret void
|
|
384 }
|
|
385
|
|
386 ; GCN-LABEL: {{^}}immediate_mul_v2i16:
|
|
387 ; NOSDWA-NOT: v_mul_u32_u24_sdwa
|
|
388 ; VI-DAG: v_mov_b32_e32 v[[M321:[0-9]+]], 0x141
|
|
389 ; VI-DAG: v_mov_b32_e32 v[[M123:[0-9]+]], 0x7b
|
|
390 ; VI-DAG: v_mul_u32_u24_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v[[M123]] dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
|
|
391 ; VI-DAG: v_mul_u32_u24_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v[[M321]] dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
|
|
392
|
|
393 ; GFX9: s_mov_b32 s[[IMM:[0-9]+]], 0x141007b
|
|
394 ; GFX9: v_pk_mul_lo_u16 v{{[0-9]+}}, v{{[0-9]+}}, s[[IMM]]
|
|
395
|
|
396 define amdgpu_kernel void @immediate_mul_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in) {
|
|
397 entry:
|
|
398 %a = load <2 x i16>, <2 x i16> addrspace(1)* %in, align 4
|
|
399 %mul = mul <2 x i16> %a, <i16 123, i16 321>
|
|
400 store <2 x i16> %mul, <2 x i16> addrspace(1)* %out, align 4
|
|
401 ret void
|
|
402 }
|
|
403
|
|
404 ; Double use of same src - should not convert it
|
|
405 ; GCN-LABEL: {{^}}mulmul_v2i16:
|
|
406 ; NOSDWA: v_lshrrev_b32_e32 v{{[0-9]+}}, 16, v{{[0-9]+}}
|
|
407 ; NOSDWA: v_lshrrev_b32_e32 v{{[0-9]+}}, 16, v{{[0-9]+}}
|
|
408 ; NOSDWA: v_mul_u32_u24_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
|
|
409 ; NOSDWA: v_lshlrev_b32_e32 v{{[0-9]+}}, 16, v{{[0-9]+}}
|
|
410 ; NOSDWA: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
|
|
411 ; NOSDWA-NOT: v_mul_u32_u24_sdwa
|
|
412
|
|
413 ; VI: v_mul_u32_u24_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
|
|
414
|
|
415 ; GFX9: v_pk_mul_lo_u16 v[[DST1:[0-9]+]], v{{[0-9]+}}, v{{[0-9]+}}
|
|
416 ; GFX9: v_pk_mul_lo_u16 v{{[0-9]+}}, v[[DST1]], v{{[0-9]+}}
|
|
417
|
|
418 define amdgpu_kernel void @mulmul_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %ina, <2 x i16> addrspace(1)* %inb) {
|
|
419 entry:
|
|
420 %a = load <2 x i16>, <2 x i16> addrspace(1)* %ina, align 4
|
|
421 %b = load <2 x i16>, <2 x i16> addrspace(1)* %inb, align 4
|
|
422 %mul = mul <2 x i16> %a, %b
|
|
423 %mul2 = mul <2 x i16> %mul, %b
|
|
424 store <2 x i16> %mul2, <2 x i16> addrspace(1)* %out, align 4
|
|
425 ret void
|
|
426 }
|
|
427
|
|
428 ; GCN-LABEL: {{^}}add_bb_v2i16:
|
|
429 ; NOSDWA-NOT: v_add_i32_sdwa
|
|
430
|
|
431 ; VI: v_add_i32_sdwa v{{[0-9]+}}, vcc, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
|
|
432
|
|
433 ; GFX9: v_pk_add_u16 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
|
|
434
|
|
435 define amdgpu_kernel void @add_bb_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %ina, <2 x i16> addrspace(1)* %inb) {
|
|
436 entry:
|
|
437 %a = load <2 x i16>, <2 x i16> addrspace(1)* %ina, align 4
|
|
438 %b = load <2 x i16>, <2 x i16> addrspace(1)* %inb, align 4
|
|
439 br label %add_label
|
|
440 add_label:
|
|
441 %add = add <2 x i16> %a, %b
|
|
442 br label %store_label
|
|
443 store_label:
|
|
444 store <2 x i16> %add, <2 x i16> addrspace(1)* %out, align 4
|
|
445 ret void
|
|
446 }
|
|
447
|
|
448
|
|
449 ; Check that "pulling out" SDWA operands works correctly.
|
|
450 ; GCN-LABEL: {{^}}pulled_out_test:
|
|
451 ; NOSDWA-DAG: v_and_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}
|
|
452 ; NOSDWA-DAG: v_lshlrev_b16_e32 v{{[0-9]+}}, 8, v{{[0-9]+}}
|
|
453 ; NOSDWA-DAG: v_and_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}
|
|
454 ; NOSDWA-DAG: v_lshlrev_b16_e32 v{{[0-9]+}}, 8, v{{[0-9]+}}
|
|
455 ; NOSDWA: v_or_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
|
|
456 ; NOSDWA-NOT: v_and_b32_sdwa
|
|
457 ; NOSDWA-NOT: v_or_b32_sdwa
|
|
458
|
|
459 ; VI-DAG: v_and_b32_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
|
|
460 ; GFX9-DAG: v_and_b32_sdwa v{{[0-9]+}}, v{{[0-9]+}}, s{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
|
|
461 ; SDWA-DAG: v_lshlrev_b16_e32 v{{[0-9]+}}, 8, v{{[0-9]+}}
|
|
462 ; VI-DAG: v_and_b32_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
|
|
463 ; GFX9-DAG: v_and_b32_sdwa v{{[0-9]+}}, v{{[0-9]+}}, s{{[0-9]+}} dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
|
|
464 ; SDWA-DAG: v_lshlrev_b16_e32 v{{[0-9]+}}, 8, v{{[0-9]+}}
|
|
465 ; SDWA: v_or_b32_sdwa v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}} dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
|
|
466
|
|
467 define amdgpu_kernel void @pulled_out_test(<8 x i8> addrspace(1)* %sourceA, <8 x i8> addrspace(1)* %destValues) {
|
|
468 entry:
|
|
469 %idxprom = ashr exact i64 15, 32
|
|
470 %arrayidx = getelementptr inbounds <8 x i8>, <8 x i8> addrspace(1)* %sourceA, i64 %idxprom
|
|
471 %tmp = load <8 x i8>, <8 x i8> addrspace(1)* %arrayidx, align 8
|
|
472
|
|
473 %tmp1 = extractelement <8 x i8> %tmp, i32 0
|
|
474 %tmp2 = extractelement <8 x i8> %tmp, i32 1
|
|
475 %tmp3 = extractelement <8 x i8> %tmp, i32 2
|
|
476 %tmp4 = extractelement <8 x i8> %tmp, i32 3
|
|
477 %tmp5 = extractelement <8 x i8> %tmp, i32 4
|
|
478 %tmp6 = extractelement <8 x i8> %tmp, i32 5
|
|
479 %tmp7 = extractelement <8 x i8> %tmp, i32 6
|
|
480 %tmp8 = extractelement <8 x i8> %tmp, i32 7
|
|
481
|
|
482 %tmp9 = insertelement <2 x i8> undef, i8 %tmp1, i32 0
|
|
483 %tmp10 = insertelement <2 x i8> %tmp9, i8 %tmp2, i32 1
|
|
484 %tmp11 = insertelement <2 x i8> undef, i8 %tmp3, i32 0
|
|
485 %tmp12 = insertelement <2 x i8> %tmp11, i8 %tmp4, i32 1
|
|
486 %tmp13 = insertelement <2 x i8> undef, i8 %tmp5, i32 0
|
|
487 %tmp14 = insertelement <2 x i8> %tmp13, i8 %tmp6, i32 1
|
|
488 %tmp15 = insertelement <2 x i8> undef, i8 %tmp7, i32 0
|
|
489 %tmp16 = insertelement <2 x i8> %tmp15, i8 %tmp8, i32 1
|
|
490
|
|
491 %tmp17 = shufflevector <2 x i8> %tmp10, <2 x i8> %tmp12, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
|
492 %tmp18 = shufflevector <2 x i8> %tmp14, <2 x i8> %tmp16, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
|
|
493 %tmp19 = shufflevector <4 x i8> %tmp17, <4 x i8> %tmp18, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
|
|
494
|
|
495 %arrayidx5 = getelementptr inbounds <8 x i8>, <8 x i8> addrspace(1)* %destValues, i64 %idxprom
|
|
496 store <8 x i8> %tmp19, <8 x i8> addrspace(1)* %arrayidx5, align 8
|
|
497 ret void
|
|
498 }
|