comparison test/CodeGen/AArch64/arm64-umaxv.ll @ 134:3a76565eade5 LLVM5.0.1

update 5.0.1
author mir3636
date Sat, 17 Feb 2018 09:57:20 +0900
parents 1172e4bd9c6f
children
comparison
equal deleted inserted replaced
133:c60214abe0e8 134:3a76565eade5
87 } 87 }
88 88
89 define <8 x i8> @test_vmaxv_u8_used_by_laneop(<8 x i8> %a1, <8 x i8> %a2) { 89 define <8 x i8> @test_vmaxv_u8_used_by_laneop(<8 x i8> %a1, <8 x i8> %a2) {
90 ; CHECK-LABEL: test_vmaxv_u8_used_by_laneop: 90 ; CHECK-LABEL: test_vmaxv_u8_used_by_laneop:
91 ; CHECK: umaxv.8b b[[REGNUM:[0-9]+]], v1 91 ; CHECK: umaxv.8b b[[REGNUM:[0-9]+]], v1
92 ; CHECK-NEXT: ins.b v0[3], v[[REGNUM]][0] 92 ; CHECK-NEXT: mov.b v0[3], v[[REGNUM]][0]
93 ; CHECK-NEXT: ret 93 ; CHECK-NEXT: ret
94 entry: 94 entry:
95 %0 = tail call i32 @llvm.aarch64.neon.umaxv.i32.v8i8(<8 x i8> %a2) 95 %0 = tail call i32 @llvm.aarch64.neon.umaxv.i32.v8i8(<8 x i8> %a2)
96 %1 = trunc i32 %0 to i8 96 %1 = trunc i32 %0 to i8
97 %2 = insertelement <8 x i8> %a1, i8 %1, i32 3 97 %2 = insertelement <8 x i8> %a1, i8 %1, i32 3
99 } 99 }
100 100
101 define <4 x i16> @test_vmaxv_u16_used_by_laneop(<4 x i16> %a1, <4 x i16> %a2) { 101 define <4 x i16> @test_vmaxv_u16_used_by_laneop(<4 x i16> %a1, <4 x i16> %a2) {
102 ; CHECK-LABEL: test_vmaxv_u16_used_by_laneop: 102 ; CHECK-LABEL: test_vmaxv_u16_used_by_laneop:
103 ; CHECK: umaxv.4h h[[REGNUM:[0-9]+]], v1 103 ; CHECK: umaxv.4h h[[REGNUM:[0-9]+]], v1
104 ; CHECK-NEXT: ins.h v0[3], v[[REGNUM]][0] 104 ; CHECK-NEXT: mov.h v0[3], v[[REGNUM]][0]
105 ; CHECK-NEXT: ret 105 ; CHECK-NEXT: ret
106 entry: 106 entry:
107 %0 = tail call i32 @llvm.aarch64.neon.umaxv.i32.v4i16(<4 x i16> %a2) 107 %0 = tail call i32 @llvm.aarch64.neon.umaxv.i32.v4i16(<4 x i16> %a2)
108 %1 = trunc i32 %0 to i16 108 %1 = trunc i32 %0 to i16
109 %2 = insertelement <4 x i16> %a1, i16 %1, i32 3 109 %2 = insertelement <4 x i16> %a1, i16 %1, i32 3
111 } 111 }
112 112
113 define <2 x i32> @test_vmaxv_u32_used_by_laneop(<2 x i32> %a1, <2 x i32> %a2) { 113 define <2 x i32> @test_vmaxv_u32_used_by_laneop(<2 x i32> %a1, <2 x i32> %a2) {
114 ; CHECK-LABEL: test_vmaxv_u32_used_by_laneop: 114 ; CHECK-LABEL: test_vmaxv_u32_used_by_laneop:
115 ; CHECK: umaxp.2s v[[REGNUM:[0-9]+]], v1, v1 115 ; CHECK: umaxp.2s v[[REGNUM:[0-9]+]], v1, v1
116 ; CHECK-NEXT: ins.s v0[1], v[[REGNUM]][0] 116 ; CHECK-NEXT: mov.s v0[1], v[[REGNUM]][0]
117 ; CHECK-NEXT: ret 117 ; CHECK-NEXT: ret
118 entry: 118 entry:
119 %0 = tail call i32 @llvm.aarch64.neon.umaxv.i32.v2i32(<2 x i32> %a2) 119 %0 = tail call i32 @llvm.aarch64.neon.umaxv.i32.v2i32(<2 x i32> %a2)
120 %1 = insertelement <2 x i32> %a1, i32 %0, i32 1 120 %1 = insertelement <2 x i32> %a1, i32 %0, i32 1
121 ret <2 x i32> %1 121 ret <2 x i32> %1
122 } 122 }
123 123
124 define <16 x i8> @test_vmaxvq_u8_used_by_laneop(<16 x i8> %a1, <16 x i8> %a2) { 124 define <16 x i8> @test_vmaxvq_u8_used_by_laneop(<16 x i8> %a1, <16 x i8> %a2) {
125 ; CHECK-LABEL: test_vmaxvq_u8_used_by_laneop: 125 ; CHECK-LABEL: test_vmaxvq_u8_used_by_laneop:
126 ; CHECK: umaxv.16b b[[REGNUM:[0-9]+]], v1 126 ; CHECK: umaxv.16b b[[REGNUM:[0-9]+]], v1
127 ; CHECK-NEXT: ins.b v0[3], v[[REGNUM]][0] 127 ; CHECK-NEXT: mov.b v0[3], v[[REGNUM]][0]
128 ; CHECK-NEXT: ret 128 ; CHECK-NEXT: ret
129 entry: 129 entry:
130 %0 = tail call i32 @llvm.aarch64.neon.umaxv.i32.v16i8(<16 x i8> %a2) 130 %0 = tail call i32 @llvm.aarch64.neon.umaxv.i32.v16i8(<16 x i8> %a2)
131 %1 = trunc i32 %0 to i8 131 %1 = trunc i32 %0 to i8
132 %2 = insertelement <16 x i8> %a1, i8 %1, i32 3 132 %2 = insertelement <16 x i8> %a1, i8 %1, i32 3
134 } 134 }
135 135
136 define <8 x i16> @test_vmaxvq_u16_used_by_laneop(<8 x i16> %a1, <8 x i16> %a2) { 136 define <8 x i16> @test_vmaxvq_u16_used_by_laneop(<8 x i16> %a1, <8 x i16> %a2) {
137 ; CHECK-LABEL: test_vmaxvq_u16_used_by_laneop: 137 ; CHECK-LABEL: test_vmaxvq_u16_used_by_laneop:
138 ; CHECK: umaxv.8h h[[REGNUM:[0-9]+]], v1 138 ; CHECK: umaxv.8h h[[REGNUM:[0-9]+]], v1
139 ; CHECK-NEXT: ins.h v0[3], v[[REGNUM]][0] 139 ; CHECK-NEXT: mov.h v0[3], v[[REGNUM]][0]
140 ; CHECK-NEXT: ret 140 ; CHECK-NEXT: ret
141 entry: 141 entry:
142 %0 = tail call i32 @llvm.aarch64.neon.umaxv.i32.v8i16(<8 x i16> %a2) 142 %0 = tail call i32 @llvm.aarch64.neon.umaxv.i32.v8i16(<8 x i16> %a2)
143 %1 = trunc i32 %0 to i16 143 %1 = trunc i32 %0 to i16
144 %2 = insertelement <8 x i16> %a1, i16 %1, i32 3 144 %2 = insertelement <8 x i16> %a1, i16 %1, i32 3
146 } 146 }
147 147
148 define <4 x i32> @test_vmaxvq_u32_used_by_laneop(<4 x i32> %a1, <4 x i32> %a2) { 148 define <4 x i32> @test_vmaxvq_u32_used_by_laneop(<4 x i32> %a1, <4 x i32> %a2) {
149 ; CHECK-LABEL: test_vmaxvq_u32_used_by_laneop: 149 ; CHECK-LABEL: test_vmaxvq_u32_used_by_laneop:
150 ; CHECK: umaxv.4s s[[REGNUM:[0-9]+]], v1 150 ; CHECK: umaxv.4s s[[REGNUM:[0-9]+]], v1
151 ; CHECK-NEXT: ins.s v0[3], v[[REGNUM]][0] 151 ; CHECK-NEXT: mov.s v0[3], v[[REGNUM]][0]
152 ; CHECK-NEXT: ret 152 ; CHECK-NEXT: ret
153 entry: 153 entry:
154 %0 = tail call i32 @llvm.aarch64.neon.umaxv.i32.v4i32(<4 x i32> %a2) 154 %0 = tail call i32 @llvm.aarch64.neon.umaxv.i32.v4i32(<4 x i32> %a2)
155 %1 = insertelement <4 x i32> %a1, i32 %0, i32 3 155 %1 = insertelement <4 x i32> %a1, i32 %0, i32 3
156 ret <4 x i32> %1 156 ret <4 x i32> %1