221
|
1 ; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -verify-machineinstrs < %s | FileCheck --check-prefixes=GCN,SI,FUNC %s
|
|
2 ; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck --check-prefixes=GCN,CI,FUNC %s
|
|
3 ; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck --check-prefixes=GCN,CI,FUNC %s
|
150
|
4
|
221
|
5 ; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -enable-unsafe-fp-math -verify-machineinstrs < %s | FileCheck --check-prefixes=GCN-UNSAFE,FUNC %s
|
|
6 ; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -enable-unsafe-fp-math -verify-machineinstrs < %s | FileCheck --check-prefixes=GCN-UNSAFE,VI-UNSAFE,FUNC %s
|
150
|
7
|
|
8 declare double @llvm.fabs.f64(double) #0
|
|
9 declare double @llvm.floor.f64(double) #0
|
|
10
|
|
11 ; FUNC-LABEL: {{^}}fract_f64:
|
236
|
12 ; SI-DAG: v_fract_f64_e32 [[FRC:v\[[0-9]+:[0-9]+\]]], v[[[LO:[0-9]+]]:[[HI:[0-9]+]]]
|
150
|
13 ; SI-DAG: v_mov_b32_e32 v[[UPLO:[0-9]+]], -1
|
|
14 ; SI-DAG: v_mov_b32_e32 v[[UPHI:[0-9]+]], 0x3fefffff
|
236
|
15 ; SI-DAG: v_min_f64 v[[[MINLO:[0-9]+]]:[[MINHI:[0-9]+]]], [[FRC]], v[[[UPLO]]:[[UPHI]]]
|
|
16 ; SI-DAG: v_cmp_class_f64_e64 vcc, v[[[LO]]:[[HI]]], 3
|
150
|
17 ; SI: v_cndmask_b32_e32 v[[RESLO:[0-9]+]], v[[MINLO]], v[[LO]], vcc
|
|
18 ; SI: v_cndmask_b32_e32 v[[RESHI:[0-9]+]], v[[MINHI]], v[[HI]], vcc
|
236
|
19 ; SI: v_add_f64 [[SUB0:v\[[0-9]+:[0-9]+\]]], v[[[LO]]:[[HI]]], -v[[[RESLO]]:[[RESHI]]]
|
|
20 ; SI: v_add_f64 [[FRACT:v\[[0-9]+:[0-9]+\]]], v[[[LO]]:[[HI]]], -[[SUB0]]
|
150
|
21
|
|
22 ; CI: buffer_load_dwordx2 [[X:v\[[0-9]+:[0-9]+\]]]
|
|
23 ; CI: v_floor_f64_e32 [[FLOORX:v\[[0-9]+:[0-9]+\]]], [[X]]
|
|
24 ; CI: v_add_f64 [[FRACT:v\[[0-9]+:[0-9]+\]]], [[X]], -[[FLOORX]]
|
|
25
|
|
26 ; GCN-UNSAFE: buffer_load_dwordx2 [[X:v\[[0-9]+:[0-9]+\]]]
|
|
27 ; GCN-UNSAFE: v_fract_f64_e32 [[FRACT:v\[[0-9]+:[0-9]+\]]], [[X]]
|
|
28
|
|
29 ; GCN: buffer_store_dwordx2 [[FRACT]]
|
|
30 define amdgpu_kernel void @fract_f64(double addrspace(1)* %out, double addrspace(1)* %src) #1 {
|
|
31 %x = load double, double addrspace(1)* %src
|
|
32 %floor.x = call double @llvm.floor.f64(double %x)
|
|
33 %fract = fsub double %x, %floor.x
|
|
34 store double %fract, double addrspace(1)* %out
|
|
35 ret void
|
|
36 }
|
|
37
|
|
38 ; FUNC-LABEL: {{^}}fract_f64_neg:
|
236
|
39 ; SI-DAG: v_fract_f64_e64 [[FRC:v\[[0-9]+:[0-9]+\]]], -v[[[LO:[0-9]+]]:[[HI:[0-9]+]]]
|
150
|
40 ; SI-DAG: v_mov_b32_e32 v[[UPLO:[0-9]+]], -1
|
|
41 ; SI-DAG: v_mov_b32_e32 v[[UPHI:[0-9]+]], 0x3fefffff
|
236
|
42 ; SI-DAG: v_min_f64 v[[[MINLO:[0-9]+]]:[[MINHI:[0-9]+]]], [[FRC]], v[[[UPLO]]:[[UPHI]]]
|
|
43 ; SI-DAG: v_cmp_class_f64_e64 vcc, v[[[LO]]:[[HI]]], 3
|
150
|
44 ; SI: v_cndmask_b32_e32 v[[RESLO:[0-9]+]], v[[MINLO]], v[[LO]], vcc
|
|
45 ; SI: v_cndmask_b32_e32 v[[RESHI:[0-9]+]], v[[MINHI]], v[[HI]], vcc
|
236
|
46 ; SI: v_add_f64 [[SUB0:v\[[0-9]+:[0-9]+\]]], -v[[[LO]]:[[HI]]], -v[[[RESLO]]:[[RESHI]]]
|
|
47 ; SI: v_add_f64 [[FRACT:v\[[0-9]+:[0-9]+\]]], -v[[[LO]]:[[HI]]], -[[SUB0]]
|
150
|
48
|
|
49 ; CI: buffer_load_dwordx2 [[X:v\[[0-9]+:[0-9]+\]]]
|
|
50 ; CI: v_floor_f64_e64 [[FLOORX:v\[[0-9]+:[0-9]+\]]], -[[X]]
|
|
51 ; CI: v_add_f64 [[FRACT:v\[[0-9]+:[0-9]+\]]], -[[X]], -[[FLOORX]]
|
|
52
|
|
53 ; GCN-UNSAFE: buffer_load_dwordx2 [[X:v\[[0-9]+:[0-9]+\]]]
|
|
54 ; GCN-UNSAFE: v_fract_f64_e64 [[FRACT:v\[[0-9]+:[0-9]+\]]], -[[X]]
|
|
55
|
|
56 ; GCN: buffer_store_dwordx2 [[FRACT]]
|
|
57 define amdgpu_kernel void @fract_f64_neg(double addrspace(1)* %out, double addrspace(1)* %src) #1 {
|
|
58 %x = load double, double addrspace(1)* %src
|
|
59 %neg.x = fsub double -0.0, %x
|
|
60 %floor.neg.x = call double @llvm.floor.f64(double %neg.x)
|
|
61 %fract = fsub double %neg.x, %floor.neg.x
|
|
62 store double %fract, double addrspace(1)* %out
|
|
63 ret void
|
|
64 }
|
|
65
|
|
66 ; FUNC-LABEL: {{^}}fract_f64_neg_abs:
|
236
|
67 ; SI-DAG: v_fract_f64_e64 [[FRC:v\[[0-9]+:[0-9]+\]]], -|v[[[LO:[0-9]+]]:[[HI:[0-9]+]]]|
|
150
|
68 ; SI-DAG: v_mov_b32_e32 v[[UPLO:[0-9]+]], -1
|
|
69 ; SI-DAG: v_mov_b32_e32 v[[UPHI:[0-9]+]], 0x3fefffff
|
236
|
70 ; SI-DAG: v_min_f64 v[[[MINLO:[0-9]+]]:[[MINHI:[0-9]+]]], [[FRC]], v[[[UPLO]]:[[UPHI]]]
|
|
71 ; SI-DAG: v_cmp_class_f64_e64 vcc, v[[[LO]]:[[HI]]], 3
|
150
|
72 ; SI: v_cndmask_b32_e32 v[[RESLO:[0-9]+]], v[[MINLO]], v[[LO]], vcc
|
|
73 ; SI: v_cndmask_b32_e32 v[[RESHI:[0-9]+]], v[[MINHI]], v[[HI]], vcc
|
236
|
74 ; SI: v_add_f64 [[SUB0:v\[[0-9]+:[0-9]+\]]], -|v[[[LO]]:[[HI]]]|, -v[[[RESLO]]:[[RESHI]]]
|
|
75 ; SI: v_add_f64 [[FRACT:v\[[0-9]+:[0-9]+\]]], -|v[[[LO]]:[[HI]]]|, -[[SUB0]]
|
150
|
76
|
|
77 ; CI: buffer_load_dwordx2 [[X:v\[[0-9]+:[0-9]+\]]]
|
|
78 ; CI: v_floor_f64_e64 [[FLOORX:v\[[0-9]+:[0-9]+\]]], -|[[X]]|
|
|
79 ; CI: v_add_f64 [[FRACT:v\[[0-9]+:[0-9]+\]]], -|[[X]]|, -[[FLOORX]]
|
|
80
|
|
81 ; GCN-UNSAFE: buffer_load_dwordx2 [[X:v\[[0-9]+:[0-9]+\]]]
|
|
82 ; GCN-UNSAFE: v_fract_f64_e64 [[FRACT:v\[[0-9]+:[0-9]+\]]], -|[[X]]|
|
|
83
|
|
84 ; GCN: buffer_store_dwordx2 [[FRACT]]
|
|
85 define amdgpu_kernel void @fract_f64_neg_abs(double addrspace(1)* %out, double addrspace(1)* %src) #1 {
|
|
86 %x = load double, double addrspace(1)* %src
|
|
87 %abs.x = call double @llvm.fabs.f64(double %x)
|
|
88 %neg.abs.x = fsub double -0.0, %abs.x
|
|
89 %floor.neg.abs.x = call double @llvm.floor.f64(double %neg.abs.x)
|
|
90 %fract = fsub double %neg.abs.x, %floor.neg.abs.x
|
|
91 store double %fract, double addrspace(1)* %out
|
|
92 ret void
|
|
93 }
|
|
94
|
|
95 ; FUNC-LABEL: {{^}}multi_use_floor_fract_f64:
|
|
96 ; VI-UNSAFE: buffer_load_dwordx2 [[X:v\[[0-9]+:[0-9]+\]]]
|
|
97 ; VI-UNSAFE-DAG: v_floor_f64_e32 [[FLOOR:v\[[0-9]+:[0-9]+\]]], [[X]]
|
|
98 ; VI-UNSAFE-DAG: v_fract_f64_e32 [[FRACT:v\[[0-9]+:[0-9]+\]]], [[X]]
|
|
99 ; VI-UNSAFE: buffer_store_dwordx2 [[FLOOR]]
|
|
100 ; VI-UNSAFE: buffer_store_dwordx2 [[FRACT]]
|
|
101 define amdgpu_kernel void @multi_use_floor_fract_f64(double addrspace(1)* %out, double addrspace(1)* %src) #1 {
|
|
102 %x = load double, double addrspace(1)* %src
|
|
103 %floor.x = call double @llvm.floor.f64(double %x)
|
|
104 %fract = fsub double %x, %floor.x
|
|
105 store volatile double %floor.x, double addrspace(1)* %out
|
|
106 store volatile double %fract, double addrspace(1)* %out
|
|
107 ret void
|
|
108 }
|
|
109
|
|
110 attributes #0 = { nounwind readnone }
|
|
111 attributes #1 = { nounwind }
|