173
|
1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
2 ; RUN: llc -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN %s
|
|
3
|
|
4 define amdgpu_kernel void @sext_i16_to_i32_uniform(i32 addrspace(1)* %out, i16 %a, i32 %b) {
|
|
5 ; GCN-LABEL: sext_i16_to_i32_uniform:
|
|
6 ; GCN: ; %bb.0:
|
|
7 ; GCN-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9
|
|
8 ; GCN-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xb
|
|
9 ; GCN-NEXT: s_mov_b32 s7, 0xf000
|
|
10 ; GCN-NEXT: s_mov_b32 s6, -1
|
|
11 ; GCN-NEXT: s_waitcnt lgkmcnt(0)
|
|
12 ; GCN-NEXT: s_sext_i32_i16 s0, s0
|
|
13 ; GCN-NEXT: s_add_i32 s0, s1, s0
|
|
14 ; GCN-NEXT: v_mov_b32_e32 v0, s0
|
|
15 ; GCN-NEXT: buffer_store_dword v0, off, s[4:7], 0
|
|
16 ; GCN-NEXT: s_endpgm
|
|
17 %sext = sext i16 %a to i32
|
|
18 %res = add i32 %b, %sext
|
|
19 store i32 %res, i32 addrspace(1)* %out
|
|
20 ret void
|
|
21 }
|
|
22
|
|
23
|
|
24 define amdgpu_kernel void @sext_i16_to_i64_uniform(i64 addrspace(1)* %out, i16 %a, i64 %b) {
|
|
25 ; GCN-LABEL: sext_i16_to_i64_uniform:
|
|
26 ; GCN: ; %bb.0:
|
|
27 ; GCN-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9
|
|
28 ; GCN-NEXT: s_load_dword s2, s[0:1], 0xb
|
|
29 ; GCN-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0xd
|
|
30 ; GCN-NEXT: s_mov_b32 s7, 0xf000
|
|
31 ; GCN-NEXT: s_mov_b32 s6, -1
|
|
32 ; GCN-NEXT: s_waitcnt lgkmcnt(0)
|
|
33 ; GCN-NEXT: s_bfe_i64 s[2:3], s[2:3], 0x100000
|
|
34 ; GCN-NEXT: s_add_u32 s0, s0, s2
|
|
35 ; GCN-NEXT: s_addc_u32 s1, s1, s3
|
|
36 ; GCN-NEXT: v_mov_b32_e32 v0, s0
|
|
37 ; GCN-NEXT: v_mov_b32_e32 v1, s1
|
|
38 ; GCN-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
|
|
39 ; GCN-NEXT: s_endpgm
|
|
40 %sext = sext i16 %a to i64
|
|
41 %res = add i64 %b, %sext
|
|
42 store i64 %res, i64 addrspace(1)* %out
|
|
43 ret void
|
|
44 }
|
|
45
|
|
46 define amdgpu_kernel void @sext_i16_to_i32_divergent(i32 addrspace(1)* %out, i16 %a, i32 %b) {
|
|
47 ; GCN-LABEL: sext_i16_to_i32_divergent:
|
|
48 ; GCN: ; %bb.0:
|
|
49 ; GCN-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9
|
|
50 ; GCN-NEXT: s_load_dword s0, s[0:1], 0xb
|
|
51 ; GCN-NEXT: s_mov_b32 s7, 0xf000
|
|
52 ; GCN-NEXT: s_mov_b32 s6, -1
|
|
53 ; GCN-NEXT: s_waitcnt lgkmcnt(0)
|
|
54 ; GCN-NEXT: v_add_i32_e32 v0, vcc, s0, v0
|
|
55 ; GCN-NEXT: v_bfe_i32 v0, v0, 0, 16
|
|
56 ; GCN-NEXT: buffer_store_dword v0, off, s[4:7], 0
|
|
57 ; GCN-NEXT: s_endpgm
|
|
58 %tid = call i32 @llvm.amdgcn.workitem.id.x()
|
|
59 %tid.truncated = trunc i32 %tid to i16
|
|
60 %divergent.a = add i16 %a, %tid.truncated
|
|
61 %sext = sext i16 %divergent.a to i32
|
|
62 store i32 %sext, i32 addrspace(1)* %out
|
|
63 ret void
|
|
64 }
|
|
65
|
|
66
|
|
67 define amdgpu_kernel void @sext_i16_to_i64_divergent(i64 addrspace(1)* %out, i16 %a, i64 %b) {
|
|
68 ; GCN-LABEL: sext_i16_to_i64_divergent:
|
|
69 ; GCN: ; %bb.0:
|
|
70 ; GCN-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9
|
|
71 ; GCN-NEXT: s_load_dword s0, s[0:1], 0xb
|
|
72 ; GCN-NEXT: s_mov_b32 s7, 0xf000
|
|
73 ; GCN-NEXT: s_mov_b32 s6, -1
|
|
74 ; GCN-NEXT: s_waitcnt lgkmcnt(0)
|
|
75 ; GCN-NEXT: v_add_i32_e32 v0, vcc, s0, v0
|
|
76 ; GCN-NEXT: v_bfe_i32 v0, v0, 0, 16
|
|
77 ; GCN-NEXT: v_ashrrev_i32_e32 v1, 31, v0
|
|
78 ; GCN-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
|
|
79 ; GCN-NEXT: s_endpgm
|
|
80 %tid = call i32 @llvm.amdgcn.workitem.id.x()
|
|
81 %tid.truncated = trunc i32 %tid to i16
|
|
82 %divergent.a = add i16 %a, %tid.truncated
|
|
83 %sext = sext i16 %divergent.a to i64
|
|
84 store i64 %sext, i64 addrspace(1)* %out
|
|
85 ret void
|
|
86 }
|
|
87
|
|
88 declare i32 @llvm.amdgcn.workitem.id.x() #1
|
|
89
|
|
90 attributes #0 = { nounwind }
|
|
91 attributes #1 = { nounwind readnone speculatable }
|