Mercurial > hg > CbC > CbC_llvm
diff test/CodeGen/AMDGPU/uint_to_fp.f64.ll @ 121:803732b1fca8
LLVM 5.0
author | kono |
---|---|
date | Fri, 27 Oct 2017 17:07:41 +0900 |
parents | 1172e4bd9c6f |
children |
line wrap: on
line diff
--- a/test/CodeGen/AMDGPU/uint_to_fp.f64.ll Fri Nov 25 19:14:25 2016 +0900 +++ b/test/CodeGen/AMDGPU/uint_to_fp.f64.ll Fri Oct 27 17:07:41 2017 +0900 @@ -9,7 +9,7 @@ ; SI-DAG: v_ldexp_f64 [[LDEXP:v\[[0-9]+:[0-9]+\]]], [[HI_CONV]], 32 ; SI: v_add_f64 [[RESULT:v\[[0-9]+:[0-9]+\]]], [[LDEXP]], [[LO_CONV]] ; SI: buffer_store_dwordx2 [[RESULT]] -define void @v_uint_to_fp_i64_to_f64(double addrspace(1)* %out, i64 addrspace(1)* %in) { +define amdgpu_kernel void @v_uint_to_fp_i64_to_f64(double addrspace(1)* %out, i64 addrspace(1)* %in) { %tid = call i32 @llvm.amdgcn.workitem.id.x() nounwind readnone %gep = getelementptr i64, i64 addrspace(1)* %in, i32 %tid %val = load i64, i64 addrspace(1)* %gep, align 8 @@ -19,21 +19,21 @@ } ; SI-LABEL: {{^}}s_uint_to_fp_i64_to_f64 -define void @s_uint_to_fp_i64_to_f64(double addrspace(1)* %out, i64 %in) { +define amdgpu_kernel void @s_uint_to_fp_i64_to_f64(double addrspace(1)* %out, i64 %in) { %cast = uitofp i64 %in to double store double %cast, double addrspace(1)* %out, align 8 ret void } ; SI-LABEL: {{^}}s_uint_to_fp_v2i64_to_v2f64 -define void @s_uint_to_fp_v2i64_to_v2f64(<2 x double> addrspace(1)* %out, <2 x i64> %in) { +define amdgpu_kernel void @s_uint_to_fp_v2i64_to_v2f64(<2 x double> addrspace(1)* %out, <2 x i64> %in) { %cast = uitofp <2 x i64> %in to <2 x double> store <2 x double> %cast, <2 x double> addrspace(1)* %out, align 16 ret void } ; SI-LABEL: {{^}}s_uint_to_fp_v4i64_to_v4f64 -define void @s_uint_to_fp_v4i64_to_v4f64(<4 x double> addrspace(1)* %out, <4 x i64> %in) { +define amdgpu_kernel void @s_uint_to_fp_v4i64_to_v4f64(<4 x double> addrspace(1)* %out, <4 x i64> %in) { %cast = uitofp <4 x i64> %in to <4 x double> store <4 x double> %cast, <4 x double> addrspace(1)* %out, align 16 ret void @@ -42,7 +42,7 @@ ; SI-LABEL: {{^}}s_uint_to_fp_i32_to_f64 ; SI: v_cvt_f64_u32_e32 ; SI: s_endpgm -define void @s_uint_to_fp_i32_to_f64(double addrspace(1)* %out, i32 %in) { +define amdgpu_kernel void @s_uint_to_fp_i32_to_f64(double addrspace(1)* %out, i32 %in) { %cast = uitofp i32 %in to double store double %cast, double addrspace(1)* %out, align 8 ret void @@ -52,7 +52,7 @@ ; SI: v_cvt_f64_u32_e32 ; SI: v_cvt_f64_u32_e32 ; SI: s_endpgm -define void @s_uint_to_fp_v2i32_to_v2f64(<2 x double> addrspace(1)* %out, <2 x i32> %in) { +define amdgpu_kernel void @s_uint_to_fp_v2i32_to_v2f64(<2 x double> addrspace(1)* %out, <2 x i32> %in) { %cast = uitofp <2 x i32> %in to <2 x double> store <2 x double> %cast, <2 x double> addrspace(1)* %out, align 16 ret void @@ -64,7 +64,7 @@ ; SI: v_cvt_f64_u32_e32 ; SI: v_cvt_f64_u32_e32 ; SI: s_endpgm -define void @s_uint_to_fp_v4i32_to_v4f64(<4 x double> addrspace(1)* %out, <4 x i32> %in) { +define amdgpu_kernel void @s_uint_to_fp_v4i32_to_v4f64(<4 x double> addrspace(1)* %out, <4 x i32> %in) { %cast = uitofp <4 x i32> %in to <4 x double> store <4 x double> %cast, <4 x double> addrspace(1)* %out, align 16 ret void @@ -79,7 +79,7 @@ ; SI-DAG: v_mov_b32_e32 v[[ZERO:[0-9]+]], 0{{$}} ; SI: buffer_store_dwordx2 v{{\[}}[[ZERO]]:[[SEL]]{{\]}} ; SI: s_endpgm -define void @uint_to_fp_i1_to_f64(double addrspace(1)* %out, i32 %in) { +define amdgpu_kernel void @uint_to_fp_i1_to_f64(double addrspace(1)* %out, i32 %in) { %cmp = icmp eq i32 %in, 0 %fp = uitofp i1 %cmp to double store double %fp, double addrspace(1)* %out, align 4 @@ -91,7 +91,7 @@ ; SI-NEXT: v_cvt_f64_u32_e32 [[RESULT:v\[[0-9]+:[0-9]\]]], [[IRESULT]] ; SI: buffer_store_dwordx2 [[RESULT]] ; SI: s_endpgm -define void @uint_to_fp_i1_to_f64_load(double addrspace(1)* %out, i1 %in) { +define amdgpu_kernel void @uint_to_fp_i1_to_f64_load(double addrspace(1)* %out, i1 %in) { %fp = uitofp i1 %in to double store double %fp, double addrspace(1)* %out, align 8 ret void