comparison test/CodeGen/AMDGPU/operand-folding.ll @ 121:803732b1fca8

LLVM 5.0
author kono
date Fri, 27 Oct 2017 17:07:41 +0900
parents 1172e4bd9c6f
children
comparison
equal deleted inserted replaced
120:1172e4bd9c6f 121:803732b1fca8
1 ; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck %s 1 ; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck %s
2 2
3 ; CHECK-LABEL: {{^}}fold_sgpr: 3 ; CHECK-LABEL: {{^}}fold_sgpr:
4 ; CHECK: v_add_i32_e32 v{{[0-9]+}}, vcc, s 4 ; CHECK: v_add_i32_e32 v{{[0-9]+}}, vcc, s
5 define void @fold_sgpr(i32 addrspace(1)* %out, i32 %fold) { 5 define amdgpu_kernel void @fold_sgpr(i32 addrspace(1)* %out, i32 %fold) {
6 entry: 6 entry:
7 %tmp0 = icmp ne i32 %fold, 0 7 %tmp0 = icmp ne i32 %fold, 0
8 br i1 %tmp0, label %if, label %endif 8 br i1 %tmp0, label %if, label %endif
9 9
10 if: 10 if:
18 ret void 18 ret void
19 } 19 }
20 20
21 ; CHECK-LABEL: {{^}}fold_imm: 21 ; CHECK-LABEL: {{^}}fold_imm:
22 ; CHECK: v_or_b32_e32 v{{[0-9]+}}, 5 22 ; CHECK: v_or_b32_e32 v{{[0-9]+}}, 5
23 define void @fold_imm(i32 addrspace(1)* %out, i32 %cmp) { 23 define amdgpu_kernel void @fold_imm(i32 addrspace(1)* %out, i32 %cmp) {
24 entry: 24 entry:
25 %fold = add i32 3, 2 25 %fold = add i32 3, 2
26 %tmp0 = icmp ne i32 %cmp, 0 26 %tmp0 = icmp ne i32 %cmp, 0
27 br i1 %tmp0, label %if, label %endif 27 br i1 %tmp0, label %if, label %endif
28 28
44 ; CHECK-DAG: s_addc_u32 [[HI:s[0-9]+]], s{{[0-9]+}}, 0 44 ; CHECK-DAG: s_addc_u32 [[HI:s[0-9]+]], s{{[0-9]+}}, 0
45 ; CHECK-DAG: v_mov_b32_e32 v[[VLO:[0-9]+]], [[LO]] 45 ; CHECK-DAG: v_mov_b32_e32 v[[VLO:[0-9]+]], [[LO]]
46 ; CHECK-DAG: v_mov_b32_e32 v[[VHI:[0-9]+]], [[HI]] 46 ; CHECK-DAG: v_mov_b32_e32 v[[VHI:[0-9]+]], [[HI]]
47 ; CHECK: buffer_store_dwordx2 v{{\[}}[[VLO]]:[[VHI]]{{\]}}, 47 ; CHECK: buffer_store_dwordx2 v{{\[}}[[VLO]]:[[VHI]]{{\]}},
48 48
49 define void @fold_64bit_constant_add(i64 addrspace(1)* %out, i32 %cmp, i64 %val) { 49 define amdgpu_kernel void @fold_64bit_constant_add(i64 addrspace(1)* %out, i32 %cmp, i64 %val) {
50 entry: 50 entry:
51 %tmp0 = add i64 %val, 1 51 %tmp0 = add i64 %val, 1
52 store i64 %tmp0, i64 addrspace(1)* %out 52 store i64 %tmp0, i64 addrspace(1)* %out
53 ret void 53 ret void
54 } 54 }
59 ; CHECK: v_xor_b32_e32 v{{[0-9]+}}, 5, v{{[0-9]+}} 59 ; CHECK: v_xor_b32_e32 v{{[0-9]+}}, 5, v{{[0-9]+}}
60 ; CHECK: v_xor_b32_e32 v{{[0-9]+}}, 5, v{{[0-9]+}} 60 ; CHECK: v_xor_b32_e32 v{{[0-9]+}}, 5, v{{[0-9]+}}
61 ; CHECK: v_xor_b32_e32 v{{[0-9]+}}, 5, v{{[0-9]+}} 61 ; CHECK: v_xor_b32_e32 v{{[0-9]+}}, 5, v{{[0-9]+}}
62 ; CHECK: v_xor_b32_e32 v{{[0-9]+}}, 5, v{{[0-9]+}} 62 ; CHECK: v_xor_b32_e32 v{{[0-9]+}}, 5, v{{[0-9]+}}
63 63
64 define void @vector_inline(<4 x i32> addrspace(1)* %out) { 64 define amdgpu_kernel void @vector_inline(<4 x i32> addrspace(1)* %out) {
65 entry: 65 entry:
66 %tmp0 = call i32 @llvm.amdgcn.workitem.id.x() 66 %tmp0 = call i32 @llvm.amdgcn.workitem.id.x()
67 %tmp1 = add i32 %tmp0, 1 67 %tmp1 = add i32 %tmp0, 1
68 %tmp2 = add i32 %tmp0, 2 68 %tmp2 = add i32 %tmp0, 2
69 %tmp3 = add i32 %tmp0, 3 69 %tmp3 = add i32 %tmp0, 3
78 78
79 ; Immediates with one use should be folded 79 ; Immediates with one use should be folded
80 ; CHECK-LABEL: {{^}}imm_one_use: 80 ; CHECK-LABEL: {{^}}imm_one_use:
81 ; CHECK: v_xor_b32_e32 v{{[0-9]+}}, 0x64, v{{[0-9]+}} 81 ; CHECK: v_xor_b32_e32 v{{[0-9]+}}, 0x64, v{{[0-9]+}}
82 82
83 define void @imm_one_use(i32 addrspace(1)* %out) { 83 define amdgpu_kernel void @imm_one_use(i32 addrspace(1)* %out) {
84 entry: 84 entry:
85 %tmp0 = call i32 @llvm.amdgcn.workitem.id.x() 85 %tmp0 = call i32 @llvm.amdgcn.workitem.id.x()
86 %tmp1 = xor i32 %tmp0, 100 86 %tmp1 = xor i32 %tmp0, 100
87 store i32 %tmp1, i32 addrspace(1)* %out 87 store i32 %tmp1, i32 addrspace(1)* %out
88 ret void 88 ret void
92 ; CHECK: v_xor_b32_e32 v{{[0-9]}}, [[IMM]], v{{[0-9]}} 92 ; CHECK: v_xor_b32_e32 v{{[0-9]}}, [[IMM]], v{{[0-9]}}
93 ; CHECK: v_xor_b32_e32 v{{[0-9]}}, [[IMM]], v{{[0-9]}} 93 ; CHECK: v_xor_b32_e32 v{{[0-9]}}, [[IMM]], v{{[0-9]}}
94 ; CHECK: v_xor_b32_e32 v{{[0-9]}}, [[IMM]], v{{[0-9]}} 94 ; CHECK: v_xor_b32_e32 v{{[0-9]}}, [[IMM]], v{{[0-9]}}
95 ; CHECK: v_xor_b32_e32 v{{[0-9]}}, [[IMM]], v{{[0-9]}} 95 ; CHECK: v_xor_b32_e32 v{{[0-9]}}, [[IMM]], v{{[0-9]}}
96 96
97 define void @vector_imm(<4 x i32> addrspace(1)* %out) { 97 define amdgpu_kernel void @vector_imm(<4 x i32> addrspace(1)* %out) {
98 entry: 98 entry:
99 %tmp0 = call i32 @llvm.amdgcn.workitem.id.x() 99 %tmp0 = call i32 @llvm.amdgcn.workitem.id.x()
100 %tmp1 = add i32 %tmp0, 1 100 %tmp1 = add i32 %tmp0, 1
101 %tmp2 = add i32 %tmp0, 2 101 %tmp2 = add i32 %tmp0, 2
102 %tmp3 = add i32 %tmp0, 3 102 %tmp3 = add i32 %tmp0, 3
112 ; A subregister use operand should not be tied. 112 ; A subregister use operand should not be tied.
113 ; CHECK-LABEL: {{^}}no_fold_tied_subregister: 113 ; CHECK-LABEL: {{^}}no_fold_tied_subregister:
114 ; CHECK: buffer_load_dwordx2 v{{\[}}[[LO:[0-9]+]]:[[HI:[0-9]+]]{{\]}} 114 ; CHECK: buffer_load_dwordx2 v{{\[}}[[LO:[0-9]+]]:[[HI:[0-9]+]]{{\]}}
115 ; CHECK: v_mac_f32_e32 v[[LO]], 0x41200000, v[[HI]] 115 ; CHECK: v_mac_f32_e32 v[[LO]], 0x41200000, v[[HI]]
116 ; CHECK: buffer_store_dword v[[LO]] 116 ; CHECK: buffer_store_dword v[[LO]]
117 define void @no_fold_tied_subregister() { 117 define amdgpu_kernel void @no_fold_tied_subregister() {
118 %tmp1 = load volatile <2 x float>, <2 x float> addrspace(1)* undef 118 %tmp1 = load volatile <2 x float>, <2 x float> addrspace(1)* undef
119 %tmp2 = extractelement <2 x float> %tmp1, i32 0 119 %tmp2 = extractelement <2 x float> %tmp1, i32 0
120 %tmp3 = extractelement <2 x float> %tmp1, i32 1 120 %tmp3 = extractelement <2 x float> %tmp1, i32 1
121 %tmp4 = fmul float %tmp3, 10.0 121 %tmp4 = fmul float %tmp3, 10.0
122 %tmp5 = fadd float %tmp4, %tmp2 122 %tmp5 = fadd float %tmp4, %tmp2