150
|
1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
|
252
|
2 // RUN: %clang_cc1 -triple thumbv8.1m.main-none-none-eabi -target-feature +mve.fp -mfloat-abi hard -O0 -disable-O0-optnone -S -emit-llvm -o - %s | opt -S -passes=mem2reg | FileCheck %s
|
|
3 // RUN: %clang_cc1 -triple thumbv8.1m.main-none-none-eabi -target-feature +mve.fp -mfloat-abi hard -O0 -disable-O0-optnone -DPOLYMORPHIC -S -emit-llvm -o - %s | opt -S -passes=mem2reg | FileCheck %s
|
236
|
4
|
|
5 // REQUIRES: aarch64-registered-target || arm-registered-target
|
150
|
6
|
|
7 #include <arm_mve.h>
|
|
8
|
|
9 // CHECK-LABEL: @test_vornq_u8(
|
|
10 // CHECK-NEXT: entry:
|
|
11 // CHECK-NEXT: [[TMP0:%.*]] = xor <16 x i8> [[B:%.*]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
|
|
12 // CHECK-NEXT: [[TMP1:%.*]] = or <16 x i8> [[A:%.*]], [[TMP0]]
|
|
13 // CHECK-NEXT: ret <16 x i8> [[TMP1]]
|
|
14 //
|
|
15 uint8x16_t test_vornq_u8(uint8x16_t a, uint8x16_t b)
|
|
16 {
|
|
17 #ifdef POLYMORPHIC
|
|
18 return vornq(a, b);
|
|
19 #else /* POLYMORPHIC */
|
|
20 return vornq_u8(a, b);
|
|
21 #endif /* POLYMORPHIC */
|
|
22 }
|
|
23
|
|
24 // CHECK-LABEL: @test_vornq_s16(
|
|
25 // CHECK-NEXT: entry:
|
|
26 // CHECK-NEXT: [[TMP0:%.*]] = xor <8 x i16> [[B:%.*]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
|
|
27 // CHECK-NEXT: [[TMP1:%.*]] = or <8 x i16> [[A:%.*]], [[TMP0]]
|
|
28 // CHECK-NEXT: ret <8 x i16> [[TMP1]]
|
|
29 //
|
|
30 int16x8_t test_vornq_s16(int16x8_t a, int16x8_t b)
|
|
31 {
|
|
32 #ifdef POLYMORPHIC
|
|
33 return vornq(a, b);
|
|
34 #else /* POLYMORPHIC */
|
|
35 return vornq_s16(a, b);
|
|
36 #endif /* POLYMORPHIC */
|
|
37 }
|
|
38
|
|
39 // CHECK-LABEL: @test_vornq_u32(
|
|
40 // CHECK-NEXT: entry:
|
|
41 // CHECK-NEXT: [[TMP0:%.*]] = xor <4 x i32> [[B:%.*]], <i32 -1, i32 -1, i32 -1, i32 -1>
|
|
42 // CHECK-NEXT: [[TMP1:%.*]] = or <4 x i32> [[A:%.*]], [[TMP0]]
|
|
43 // CHECK-NEXT: ret <4 x i32> [[TMP1]]
|
|
44 //
|
|
45 uint32x4_t test_vornq_u32(uint32x4_t a, uint32x4_t b)
|
|
46 {
|
|
47 #ifdef POLYMORPHIC
|
|
48 return vornq(a, b);
|
|
49 #else /* POLYMORPHIC */
|
|
50 return vornq_u32(a, b);
|
|
51 #endif /* POLYMORPHIC */
|
|
52 }
|
|
53
|
|
54 // CHECK-LABEL: @test_vornq_f32(
|
|
55 // CHECK-NEXT: entry:
|
|
56 // CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x float> [[A:%.*]] to <4 x i32>
|
|
57 // CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x float> [[B:%.*]] to <4 x i32>
|
|
58 // CHECK-NEXT: [[TMP2:%.*]] = xor <4 x i32> [[TMP1]], <i32 -1, i32 -1, i32 -1, i32 -1>
|
|
59 // CHECK-NEXT: [[TMP3:%.*]] = or <4 x i32> [[TMP0]], [[TMP2]]
|
|
60 // CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP3]] to <4 x float>
|
|
61 // CHECK-NEXT: ret <4 x float> [[TMP4]]
|
|
62 //
|
|
63 float32x4_t test_vornq_f32(float32x4_t a, float32x4_t b)
|
|
64 {
|
|
65 #ifdef POLYMORPHIC
|
|
66 return vornq(a, b);
|
|
67 #else /* POLYMORPHIC */
|
|
68 return vornq_f32(a, b);
|
|
69 #endif /* POLYMORPHIC */
|
|
70 }
|
|
71
|
|
72 // CHECK-LABEL: @test_vornq_m_s8(
|
|
73 // CHECK-NEXT: entry:
|
|
74 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
75 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
|
|
76 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.orn.predicated.v16i8.v16i1(<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i1> [[TMP1]], <16 x i8> [[INACTIVE:%.*]])
|
|
77 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
|
|
78 //
|
|
79 int8x16_t test_vornq_m_s8(int8x16_t inactive, int8x16_t a, int8x16_t b, mve_pred16_t p)
|
|
80 {
|
|
81 #ifdef POLYMORPHIC
|
|
82 return vornq_m(inactive, a, b, p);
|
|
83 #else /* POLYMORPHIC */
|
|
84 return vornq_m_s8(inactive, a, b, p);
|
|
85 #endif /* POLYMORPHIC */
|
|
86 }
|
|
87
|
|
88 // CHECK-LABEL: @test_vornq_m_u16(
|
|
89 // CHECK-NEXT: entry:
|
|
90 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
91 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
92 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.orn.predicated.v8i16.v8i1(<8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[TMP1]], <8 x i16> [[INACTIVE:%.*]])
|
|
93 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
|
|
94 //
|
|
95 uint16x8_t test_vornq_m_u16(uint16x8_t inactive, uint16x8_t a, uint16x8_t b, mve_pred16_t p)
|
|
96 {
|
|
97 #ifdef POLYMORPHIC
|
|
98 return vornq_m(inactive, a, b, p);
|
|
99 #else /* POLYMORPHIC */
|
|
100 return vornq_m_u16(inactive, a, b, p);
|
|
101 #endif /* POLYMORPHIC */
|
|
102 }
|
|
103
|
|
104 // CHECK-LABEL: @test_vornq_m_s32(
|
|
105 // CHECK-NEXT: entry:
|
|
106 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
107 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
108 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.orn.predicated.v4i32.v4i1(<4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]], <4 x i32> [[INACTIVE:%.*]])
|
|
109 // CHECK-NEXT: ret <4 x i32> [[TMP2]]
|
|
110 //
|
|
111 int32x4_t test_vornq_m_s32(int32x4_t inactive, int32x4_t a, int32x4_t b, mve_pred16_t p)
|
|
112 {
|
|
113 #ifdef POLYMORPHIC
|
|
114 return vornq_m(inactive, a, b, p);
|
|
115 #else /* POLYMORPHIC */
|
|
116 return vornq_m_s32(inactive, a, b, p);
|
|
117 #endif /* POLYMORPHIC */
|
|
118 }
|
|
119
|
|
120 // CHECK-LABEL: @test_vornq_m_f16(
|
|
121 // CHECK-NEXT: entry:
|
|
122 // CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x half> [[A:%.*]] to <8 x i16>
|
|
123 // CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x half> [[B:%.*]] to <8 x i16>
|
|
124 // CHECK-NEXT: [[TMP2:%.*]] = zext i16 [[P:%.*]] to i32
|
|
125 // CHECK-NEXT: [[TMP3:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP2]])
|
|
126 // CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x half> [[INACTIVE:%.*]] to <8 x i16>
|
|
127 // CHECK-NEXT: [[TMP5:%.*]] = call <8 x i16> @llvm.arm.mve.orn.predicated.v8i16.v8i1(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]], <8 x i1> [[TMP3]], <8 x i16> [[TMP4]])
|
|
128 // CHECK-NEXT: [[TMP6:%.*]] = bitcast <8 x i16> [[TMP5]] to <8 x half>
|
|
129 // CHECK-NEXT: ret <8 x half> [[TMP6]]
|
|
130 //
|
|
131 float16x8_t test_vornq_m_f16(float16x8_t inactive, float16x8_t a, float16x8_t b, mve_pred16_t p)
|
|
132 {
|
|
133 #ifdef POLYMORPHIC
|
|
134 return vornq_m(inactive, a, b, p);
|
|
135 #else /* POLYMORPHIC */
|
|
136 return vornq_m_f16(inactive, a, b, p);
|
|
137 #endif /* POLYMORPHIC */
|
|
138 }
|
|
139
|
|
140 // CHECK-LABEL: @test_vornq_x_u8(
|
|
141 // CHECK-NEXT: entry:
|
|
142 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
143 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
|
|
144 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.orn.predicated.v16i8.v16i1(<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i1> [[TMP1]], <16 x i8> undef)
|
|
145 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
|
|
146 //
|
|
147 uint8x16_t test_vornq_x_u8(uint8x16_t a, uint8x16_t b, mve_pred16_t p)
|
|
148 {
|
|
149 #ifdef POLYMORPHIC
|
|
150 return vornq_x(a, b, p);
|
|
151 #else /* POLYMORPHIC */
|
|
152 return vornq_x_u8(a, b, p);
|
|
153 #endif /* POLYMORPHIC */
|
|
154 }
|
|
155
|
|
156 // CHECK-LABEL: @test_vornq_x_s16(
|
|
157 // CHECK-NEXT: entry:
|
|
158 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
159 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
|
|
160 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.orn.predicated.v8i16.v8i1(<8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[TMP1]], <8 x i16> undef)
|
|
161 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
|
|
162 //
|
|
163 int16x8_t test_vornq_x_s16(int16x8_t a, int16x8_t b, mve_pred16_t p)
|
|
164 {
|
|
165 #ifdef POLYMORPHIC
|
|
166 return vornq_x(a, b, p);
|
|
167 #else /* POLYMORPHIC */
|
|
168 return vornq_x_s16(a, b, p);
|
|
169 #endif /* POLYMORPHIC */
|
|
170 }
|
|
171
|
|
172 // CHECK-LABEL: @test_vornq_x_u32(
|
|
173 // CHECK-NEXT: entry:
|
|
174 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
|
|
175 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
|
|
176 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.orn.predicated.v4i32.v4i1(<4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]], <4 x i32> undef)
|
|
177 // CHECK-NEXT: ret <4 x i32> [[TMP2]]
|
|
178 //
|
|
179 uint32x4_t test_vornq_x_u32(uint32x4_t a, uint32x4_t b, mve_pred16_t p)
|
|
180 {
|
|
181 #ifdef POLYMORPHIC
|
|
182 return vornq_x(a, b, p);
|
|
183 #else /* POLYMORPHIC */
|
|
184 return vornq_x_u32(a, b, p);
|
|
185 #endif /* POLYMORPHIC */
|
|
186 }
|
|
187
|
|
188 // CHECK-LABEL: @test_vornq_m_f32(
|
|
189 // CHECK-NEXT: entry:
|
|
190 // CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x float> [[A:%.*]] to <4 x i32>
|
|
191 // CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x float> [[B:%.*]] to <4 x i32>
|
|
192 // CHECK-NEXT: [[TMP2:%.*]] = zext i16 [[P:%.*]] to i32
|
|
193 // CHECK-NEXT: [[TMP3:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP2]])
|
|
194 // CHECK-NEXT: [[TMP4:%.*]] = call <4 x i32> @llvm.arm.mve.orn.predicated.v4i32.v4i1(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]], <4 x i1> [[TMP3]], <4 x i32> undef)
|
|
195 // CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP4]] to <4 x float>
|
|
196 // CHECK-NEXT: ret <4 x float> [[TMP5]]
|
|
197 //
|
|
198 float32x4_t test_vornq_m_f32(float32x4_t a, float32x4_t b, mve_pred16_t p)
|
|
199 {
|
|
200 #ifdef POLYMORPHIC
|
|
201 return vornq_x(a, b, p);
|
|
202 #else /* POLYMORPHIC */
|
|
203 return vornq_x_f32(a, b, p);
|
|
204 #endif /* POLYMORPHIC */
|
|
205 }
|