150
|
1 // REQUIRES: arm-registered-target
|
|
2 // RUN: %clang_cc1 -triple armv8.3a-arm-none-eabi -target-cpu generic \
|
|
3 // RUN: -target-feature +fullfp16 -mfloat-abi soft -S -emit-llvm -o - %s | \
|
|
4 // RUN: opt -S -sroa -o - | FileCheck %s
|
|
5
|
|
6 #include <arm_neon.h>
|
|
7
|
|
8 void foo16x4_rot90(float16x4_t a, float16x4_t b)
|
|
9 {
|
|
10 // CHECK: call <4 x half> @llvm.arm.neon.vcadd.rot90.v4f16
|
|
11 float16x4_t result = vcadd_rot90_f16(a, b);
|
|
12 }
|
|
13
|
|
14 void foo32x2_rot90(float32x2_t a, float32x2_t b)
|
|
15 {
|
|
16 // CHECK: call <2 x float> @llvm.arm.neon.vcadd.rot90.v2f32
|
|
17 float32x2_t result = vcadd_rot90_f32(a, b);
|
|
18 }
|
|
19
|
|
20 void foo16x8_rot90(float16x8_t a, float16x8_t b)
|
|
21 {
|
|
22 // CHECK: call <8 x half> @llvm.arm.neon.vcadd.rot90.v8f16
|
|
23 float16x8_t result = vcaddq_rot90_f16(a, b);
|
|
24 }
|
|
25
|
|
26 void foo32x4_rot90(float32x4_t a, float32x4_t b)
|
|
27 {
|
|
28 // CHECK: call <4 x float> @llvm.arm.neon.vcadd.rot90.v4f32
|
|
29 float32x4_t result = vcaddq_rot90_f32(a, b);
|
|
30 }
|
|
31
|
|
32 void foo16x4_rot270(float16x4_t a, float16x4_t b)
|
|
33 {
|
|
34 // CHECK: call <4 x half> @llvm.arm.neon.vcadd.rot270.v4f16
|
|
35 float16x4_t result = vcadd_rot270_f16(a, b);
|
|
36 }
|
|
37
|
|
38 void foo32x2_rot270(float32x2_t a, float32x2_t b)
|
|
39 {
|
|
40 // CHECK: call <2 x float> @llvm.arm.neon.vcadd.rot270.v2f32
|
|
41 float32x2_t result = vcadd_rot270_f32(a, b);
|
|
42 }
|
|
43
|
|
44 void foo16x8_rot270(float16x8_t a, float16x8_t b)
|
|
45 {
|
|
46 // CHECK: call <8 x half> @llvm.arm.neon.vcadd.rot270.v8f16
|
|
47 float16x8_t result = vcaddq_rot270_f16(a, b);
|
|
48 }
|
|
49
|
|
50 void foo32x4_rot270(float32x4_t a, float32x4_t b)
|
|
51 {
|
|
52 // CHECK: call <4 x float> @llvm.arm.neon.vcadd.rot270.v4f32
|
|
53 float32x4_t result = vcaddq_rot270_f32(a, b);
|
|
54 }
|