Mercurial > hg > Members > tobaru > cbc > CbC_llvm
view test/CodeGen/X86/avx512vl-arith.ll @ 122:36195a0db682
merging ( incomplete )
author | Shinji KONO <kono@ie.u-ryukyu.ac.jp> |
---|---|
date | Fri, 17 Nov 2017 20:32:31 +0900 |
parents | 803732b1fca8 |
children |
line wrap: on
line source
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512vl --show-mc-encoding| FileCheck %s ; 256-bit define <4 x i64> @vpaddq256_test(<4 x i64> %i, <4 x i64> %j) nounwind readnone { ; CHECK-LABEL: vpaddq256_test: ; CHECK: ## BB#0: ; CHECK-NEXT: vpaddq %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd4,0xc1] ; CHECK-NEXT: retq ## encoding: [0xc3] %x = add <4 x i64> %i, %j ret <4 x i64> %x } define <4 x i64> @vpaddq256_fold_test(<4 x i64> %i, <4 x i64>* %j) nounwind { ; CHECK-LABEL: vpaddq256_fold_test: ; CHECK: ## BB#0: ; CHECK-NEXT: vpaddq (%rdi), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xd4,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] %tmp = load <4 x i64>, <4 x i64>* %j, align 4 %x = add <4 x i64> %i, %tmp ret <4 x i64> %x } define <4 x i64> @vpaddq256_broadcast_test(<4 x i64> %i) nounwind { ; CHECK-LABEL: vpaddq256_broadcast_test: ; CHECK: ## BB#0: ; CHECK-NEXT: vpaddq {{.*}}(%rip){1to4}, %ymm0, %ymm0 ## encoding: [0x62,0xf1,0xfd,0x38,0xd4,0x05,A,A,A,A] ; CHECK-NEXT: ## fixup A - offset: 6, value: LCPI2_0-4, kind: reloc_riprel_4byte ; CHECK-NEXT: retq ## encoding: [0xc3] %x = add <4 x i64> %i, <i64 2, i64 2, i64 2, i64 2> ret <4 x i64> %x } define <4 x i64> @vpaddq256_broadcast2_test(<4 x i64> %i, i64* %j.ptr) nounwind { ; CHECK-LABEL: vpaddq256_broadcast2_test: ; CHECK: ## BB#0: ; CHECK-NEXT: vpaddq (%rdi){1to4}, %ymm0, %ymm0 ## encoding: [0x62,0xf1,0xfd,0x38,0xd4,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] %j = load i64, i64* %j.ptr %j.0 = insertelement <4 x i64> undef, i64 %j, i32 0 %j.v = shufflevector <4 x i64> %j.0, <4 x i64> undef, <4 x i32> zeroinitializer %x = add <4 x i64> %i, %j.v ret <4 x i64> %x } define <8 x i32> @vpaddd256_test(<8 x i32> %i, <8 x i32> %j) nounwind readnone { ; CHECK-LABEL: vpaddd256_test: ; CHECK: ## BB#0: ; CHECK-NEXT: vpaddd %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0xc1] ; CHECK-NEXT: retq ## encoding: [0xc3] %x = add <8 x i32> %i, %j ret <8 x i32> %x } define <8 x i32> @vpaddd256_fold_test(<8 x i32> %i, <8 x i32>* %j) nounwind { ; CHECK-LABEL: vpaddd256_fold_test: ; CHECK: ## BB#0: ; CHECK-NEXT: vpaddd (%rdi), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfe,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] %tmp = load <8 x i32>, <8 x i32>* %j, align 4 %x = add <8 x i32> %i, %tmp ret <8 x i32> %x } define <8 x i32> @vpaddd256_broadcast_test(<8 x i32> %i) nounwind { ; CHECK-LABEL: vpaddd256_broadcast_test: ; CHECK: ## BB#0: ; CHECK-NEXT: vpaddd {{.*}}(%rip){1to8}, %ymm0, %ymm0 ## encoding: [0x62,0xf1,0x7d,0x38,0xfe,0x05,A,A,A,A] ; CHECK-NEXT: ## fixup A - offset: 6, value: LCPI6_0-4, kind: reloc_riprel_4byte ; CHECK-NEXT: retq ## encoding: [0xc3] %x = add <8 x i32> %i, <i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3, i32 3> ret <8 x i32> %x } define <8 x i32> @vpaddd256_mask_test(<8 x i32> %i, <8 x i32> %j, <8 x i32> %mask1) nounwind readnone { ; CHECK-LABEL: vpaddd256_mask_test: ; CHECK: ## BB#0: ; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xe1,0xef,0xdb] ; CHECK-NEXT: vpcmpneqd %ymm3, %ymm2, %k1 ## encoding: [0x62,0xf3,0x6d,0x28,0x1f,0xcb,0x04] ; CHECK-NEXT: vpaddd %ymm1, %ymm0, %ymm0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xfe,0xc1] ; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <8 x i32> %mask1, zeroinitializer %x = add <8 x i32> %i, %j %r = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> %i ret <8 x i32> %r } define <8 x i32> @vpaddd256_maskz_test(<8 x i32> %i, <8 x i32> %j, <8 x i32> %mask1) nounwind readnone { ; CHECK-LABEL: vpaddd256_maskz_test: ; CHECK: ## BB#0: ; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xe1,0xef,0xdb] ; CHECK-NEXT: vpcmpneqd %ymm3, %ymm2, %k1 ## encoding: [0x62,0xf3,0x6d,0x28,0x1f,0xcb,0x04] ; CHECK-NEXT: vpaddd %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xfe,0xc1] ; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <8 x i32> %mask1, zeroinitializer %x = add <8 x i32> %i, %j %r = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> zeroinitializer ret <8 x i32> %r } define <8 x i32> @vpaddd256_mask_fold_test(<8 x i32> %i, <8 x i32>* %j.ptr, <8 x i32> %mask1) nounwind readnone { ; CHECK-LABEL: vpaddd256_mask_fold_test: ; CHECK: ## BB#0: ; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2] ; CHECK-NEXT: vpcmpneqd %ymm2, %ymm1, %k1 ## encoding: [0x62,0xf3,0x75,0x28,0x1f,0xca,0x04] ; CHECK-NEXT: vpaddd (%rdi), %ymm0, %ymm0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xfe,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <8 x i32> %mask1, zeroinitializer %j = load <8 x i32>, <8 x i32>* %j.ptr %x = add <8 x i32> %i, %j %r = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> %i ret <8 x i32> %r } define <8 x i32> @vpaddd256_mask_broadcast_test(<8 x i32> %i, <8 x i32> %mask1) nounwind readnone { ; CHECK-LABEL: vpaddd256_mask_broadcast_test: ; CHECK: ## BB#0: ; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2] ; CHECK-NEXT: vpcmpneqd %ymm2, %ymm1, %k1 ## encoding: [0x62,0xf3,0x75,0x28,0x1f,0xca,0x04] ; CHECK-NEXT: vpaddd {{.*}}(%rip){1to8}, %ymm0, %ymm0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x39,0xfe,0x05,A,A,A,A] ; CHECK-NEXT: ## fixup A - offset: 6, value: LCPI10_0-4, kind: reloc_riprel_4byte ; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <8 x i32> %mask1, zeroinitializer %x = add <8 x i32> %i, <i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4> %r = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> %i ret <8 x i32> %r } define <8 x i32> @vpaddd256_maskz_fold_test(<8 x i32> %i, <8 x i32>* %j.ptr, <8 x i32> %mask1) nounwind readnone { ; CHECK-LABEL: vpaddd256_maskz_fold_test: ; CHECK: ## BB#0: ; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2] ; CHECK-NEXT: vpcmpneqd %ymm2, %ymm1, %k1 ## encoding: [0x62,0xf3,0x75,0x28,0x1f,0xca,0x04] ; CHECK-NEXT: vpaddd (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xfe,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <8 x i32> %mask1, zeroinitializer %j = load <8 x i32>, <8 x i32>* %j.ptr %x = add <8 x i32> %i, %j %r = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> zeroinitializer ret <8 x i32> %r } define <8 x i32> @vpaddd256_maskz_broadcast_test(<8 x i32> %i, <8 x i32> %mask1) nounwind readnone { ; CHECK-LABEL: vpaddd256_maskz_broadcast_test: ; CHECK: ## BB#0: ; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2] ; CHECK-NEXT: vpcmpneqd %ymm2, %ymm1, %k1 ## encoding: [0x62,0xf3,0x75,0x28,0x1f,0xca,0x04] ; CHECK-NEXT: vpaddd {{.*}}(%rip){1to8}, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xb9,0xfe,0x05,A,A,A,A] ; CHECK-NEXT: ## fixup A - offset: 6, value: LCPI12_0-4, kind: reloc_riprel_4byte ; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <8 x i32> %mask1, zeroinitializer %x = add <8 x i32> %i, <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5> %r = select <8 x i1> %mask, <8 x i32> %x, <8 x i32> zeroinitializer ret <8 x i32> %r } define <4 x i64> @vpsubq256_test(<4 x i64> %i, <4 x i64> %j) nounwind readnone { ; CHECK-LABEL: vpsubq256_test: ; CHECK: ## BB#0: ; CHECK-NEXT: vpsubq %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfb,0xc1] ; CHECK-NEXT: retq ## encoding: [0xc3] %x = sub <4 x i64> %i, %j ret <4 x i64> %x } define <8 x i32> @vpsubd256_test(<8 x i32> %i, <8 x i32> %j) nounwind readnone { ; CHECK-LABEL: vpsubd256_test: ; CHECK: ## BB#0: ; CHECK-NEXT: vpsubd %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xfa,0xc1] ; CHECK-NEXT: retq ## encoding: [0xc3] %x = sub <8 x i32> %i, %j ret <8 x i32> %x } define <8 x i32> @vpmulld256_test(<8 x i32> %i, <8 x i32> %j) { ; CHECK-LABEL: vpmulld256_test: ; CHECK: ## BB#0: ; CHECK-NEXT: vpmulld %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x7d,0x40,0xc1] ; CHECK-NEXT: retq ## encoding: [0xc3] %x = mul <8 x i32> %i, %j ret <8 x i32> %x } define <4 x double> @test_vaddpd_256(<4 x double> %y, <4 x double> %x) { ; CHECK-LABEL: test_vaddpd_256: ; CHECK: ## BB#0: ## %entry ; CHECK-NEXT: vaddpd %ymm0, %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf5,0x58,0xc0] ; CHECK-NEXT: retq ## encoding: [0xc3] entry: %add.i = fadd <4 x double> %x, %y ret <4 x double> %add.i } define <4 x double> @test_fold_vaddpd_256(<4 x double> %y) { ; CHECK-LABEL: test_fold_vaddpd_256: ; CHECK: ## BB#0: ## %entry ; CHECK-NEXT: vaddpd {{.*}}(%rip), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x58,0x05,A,A,A,A] ; CHECK-NEXT: ## fixup A - offset: 4, value: LCPI17_0-4, kind: reloc_riprel_4byte ; CHECK-NEXT: retq ## encoding: [0xc3] entry: %add.i = fadd <4 x double> %y, <double 4.500000e+00, double 3.400000e+00, double 4.500000e+00, double 5.600000e+00> ret <4 x double> %add.i } define <8 x float> @test_broadcast_vaddpd_256(<8 x float> %a) nounwind { ; CHECK-LABEL: test_broadcast_vaddpd_256: ; CHECK: ## BB#0: ; CHECK-NEXT: vaddps {{.*}}(%rip){1to8}, %ymm0, %ymm0 ## encoding: [0x62,0xf1,0x7c,0x38,0x58,0x05,A,A,A,A] ; CHECK-NEXT: ## fixup A - offset: 6, value: LCPI18_0-4, kind: reloc_riprel_4byte ; CHECK-NEXT: retq ## encoding: [0xc3] %b = fadd <8 x float> %a, <float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000> ret <8 x float> %b } define <8 x float> @test_mask_vaddps_256(<8 x float> %dst, <8 x float> %i, <8 x float> %j, <8 x i32> %mask1) nounwind readnone { ; CHECK-LABEL: test_mask_vaddps_256: ; CHECK: ## BB#0: ; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4] ; CHECK-NEXT: vpcmpneqd %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0x65,0x28,0x1f,0xcc,0x04] ; CHECK-NEXT: vaddps %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0x74,0x29,0x58,0xc2] ; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <8 x i32> %mask1, zeroinitializer %x = fadd <8 x float> %i, %j %r = select <8 x i1> %mask, <8 x float> %x, <8 x float> %dst ret <8 x float> %r } define <8 x float> @test_mask_vmulps_256(<8 x float> %dst, <8 x float> %i, <8 x float> %j, <8 x i32> %mask1) nounwind readnone { ; CHECK-LABEL: test_mask_vmulps_256: ; CHECK: ## BB#0: ; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4] ; CHECK-NEXT: vpcmpneqd %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0x65,0x28,0x1f,0xcc,0x04] ; CHECK-NEXT: vmulps %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0x74,0x29,0x59,0xc2] ; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <8 x i32> %mask1, zeroinitializer %x = fmul <8 x float> %i, %j %r = select <8 x i1> %mask, <8 x float> %x, <8 x float> %dst ret <8 x float> %r } define <8 x float> @test_mask_vminps_256(<8 x float> %dst, <8 x float> %i, <8 x float> %j, <8 x i32> %mask1)nounwind readnone { ; CHECK-LABEL: test_mask_vminps_256: ; CHECK: ## BB#0: ; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4] ; CHECK-NEXT: vpcmpneqd %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0x65,0x28,0x1f,0xcc,0x04] ; CHECK-NEXT: vminps %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0x74,0x29,0x5d,0xc2] ; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <8 x i32> %mask1, zeroinitializer %cmp_res = fcmp olt <8 x float> %i, %j %min = select <8 x i1> %cmp_res, <8 x float> %i, <8 x float> %j %r = select <8 x i1> %mask, <8 x float> %min, <8 x float> %dst ret <8 x float> %r } define <8 x float> @test_mask_vmaxps_256(<8 x float> %dst, <8 x float> %i, <8 x float> %j, <8 x i32> %mask1) nounwind readnone { ; CHECK-LABEL: test_mask_vmaxps_256: ; CHECK: ## BB#0: ; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4] ; CHECK-NEXT: vpcmpneqd %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0x65,0x28,0x1f,0xcc,0x04] ; CHECK-NEXT: vmaxps %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0x74,0x29,0x5f,0xc2] ; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <8 x i32> %mask1, zeroinitializer %cmp_res = fcmp ogt <8 x float> %i, %j %max = select <8 x i1> %cmp_res, <8 x float> %i, <8 x float> %j %r = select <8 x i1> %mask, <8 x float> %max, <8 x float> %dst ret <8 x float> %r } define <8 x float> @test_mask_vsubps_256(<8 x float> %dst, <8 x float> %i, <8 x float> %j, <8 x i32> %mask1) nounwind readnone { ; CHECK-LABEL: test_mask_vsubps_256: ; CHECK: ## BB#0: ; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4] ; CHECK-NEXT: vpcmpneqd %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0x65,0x28,0x1f,0xcc,0x04] ; CHECK-NEXT: vsubps %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0x74,0x29,0x5c,0xc2] ; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <8 x i32> %mask1, zeroinitializer %x = fsub <8 x float> %i, %j %r = select <8 x i1> %mask, <8 x float> %x, <8 x float> %dst ret <8 x float> %r } define <8 x float> @test_mask_vdivps_256(<8 x float> %dst, <8 x float> %i, <8 x float> %j, <8 x i32> %mask1) nounwind readnone { ; CHECK-LABEL: test_mask_vdivps_256: ; CHECK: ## BB#0: ; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4] ; CHECK-NEXT: vpcmpneqd %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0x65,0x28,0x1f,0xcc,0x04] ; CHECK-NEXT: vdivps %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0x74,0x29,0x5e,0xc2] ; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <8 x i32> %mask1, zeroinitializer %x = fdiv <8 x float> %i, %j %r = select <8 x i1> %mask, <8 x float> %x, <8 x float> %dst ret <8 x float> %r } define <4 x double> @test_mask_vmulpd_256(<4 x double> %dst, <4 x double> %i, <4 x double> %j, <4 x i64> %mask1) nounwind readnone { ; CHECK-LABEL: test_mask_vmulpd_256: ; CHECK: ## BB#0: ; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4] ; CHECK-NEXT: vpcmpneqq %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0xe5,0x28,0x1f,0xcc,0x04] ; CHECK-NEXT: vmulpd %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x29,0x59,0xc2] ; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <4 x i64> %mask1, zeroinitializer %x = fmul <4 x double> %i, %j %r = select <4 x i1> %mask, <4 x double> %x, <4 x double> %dst ret <4 x double> %r } define <4 x double> @test_mask_vminpd_256(<4 x double> %dst, <4 x double> %i, <4 x double> %j, <4 x i64> %mask1) nounwind readnone { ; CHECK-LABEL: test_mask_vminpd_256: ; CHECK: ## BB#0: ; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4] ; CHECK-NEXT: vpcmpneqq %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0xe5,0x28,0x1f,0xcc,0x04] ; CHECK-NEXT: vminpd %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x29,0x5d,0xc2] ; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <4 x i64> %mask1, zeroinitializer %cmp_res = fcmp olt <4 x double> %i, %j %min = select <4 x i1> %cmp_res, <4 x double> %i, <4 x double> %j %r = select <4 x i1> %mask, <4 x double> %min, <4 x double> %dst ret <4 x double> %r } define <4 x double> @test_mask_vmaxpd_256(<4 x double> %dst, <4 x double> %i, <4 x double> %j, <4 x i64> %mask1) nounwind readnone { ; CHECK-LABEL: test_mask_vmaxpd_256: ; CHECK: ## BB#0: ; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4] ; CHECK-NEXT: vpcmpneqq %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0xe5,0x28,0x1f,0xcc,0x04] ; CHECK-NEXT: vmaxpd %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x29,0x5f,0xc2] ; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <4 x i64> %mask1, zeroinitializer %cmp_res = fcmp ogt <4 x double> %i, %j %max = select <4 x i1> %cmp_res, <4 x double> %i, <4 x double> %j %r = select <4 x i1> %mask, <4 x double> %max, <4 x double> %dst ret <4 x double> %r } define <4 x double> @test_mask_vsubpd_256(<4 x double> %dst, <4 x double> %i, <4 x double> %j, <4 x i64> %mask1) nounwind readnone { ; CHECK-LABEL: test_mask_vsubpd_256: ; CHECK: ## BB#0: ; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4] ; CHECK-NEXT: vpcmpneqq %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0xe5,0x28,0x1f,0xcc,0x04] ; CHECK-NEXT: vsubpd %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x29,0x5c,0xc2] ; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <4 x i64> %mask1, zeroinitializer %x = fsub <4 x double> %i, %j %r = select <4 x i1> %mask, <4 x double> %x, <4 x double> %dst ret <4 x double> %r } define <4 x double> @test_mask_vdivpd_256(<4 x double> %dst, <4 x double> %i, <4 x double> %j, <4 x i64> %mask1) nounwind readnone { ; CHECK-LABEL: test_mask_vdivpd_256: ; CHECK: ## BB#0: ; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4] ; CHECK-NEXT: vpcmpneqq %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0xe5,0x28,0x1f,0xcc,0x04] ; CHECK-NEXT: vdivpd %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x29,0x5e,0xc2] ; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <4 x i64> %mask1, zeroinitializer %x = fdiv <4 x double> %i, %j %r = select <4 x i1> %mask, <4 x double> %x, <4 x double> %dst ret <4 x double> %r } define <4 x double> @test_mask_vaddpd_256(<4 x double> %dst, <4 x double> %i, <4 x double> %j, <4 x i64> %mask1) nounwind readnone { ; CHECK-LABEL: test_mask_vaddpd_256: ; CHECK: ## BB#0: ; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4] ; CHECK-NEXT: vpcmpneqq %ymm4, %ymm3, %k1 ## encoding: [0x62,0xf3,0xe5,0x28,0x1f,0xcc,0x04] ; CHECK-NEXT: vaddpd %ymm2, %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x29,0x58,0xc2] ; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <4 x i64> %mask1, zeroinitializer %x = fadd <4 x double> %i, %j %r = select <4 x i1> %mask, <4 x double> %x, <4 x double> %dst ret <4 x double> %r } define <4 x double> @test_maskz_vaddpd_256(<4 x double> %i, <4 x double> %j, <4 x i64> %mask1) nounwind readnone { ; CHECK-LABEL: test_maskz_vaddpd_256: ; CHECK: ## BB#0: ; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xe1,0xef,0xdb] ; CHECK-NEXT: vpcmpneqq %ymm3, %ymm2, %k1 ## encoding: [0x62,0xf3,0xed,0x28,0x1f,0xcb,0x04] ; CHECK-NEXT: vaddpd %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0xa9,0x58,0xc1] ; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <4 x i64> %mask1, zeroinitializer %x = fadd <4 x double> %i, %j %r = select <4 x i1> %mask, <4 x double> %x, <4 x double> zeroinitializer ret <4 x double> %r } define <4 x double> @test_mask_fold_vaddpd_256(<4 x double> %dst, <4 x double> %i, <4 x double>* %j, <4 x i64> %mask1) nounwind { ; CHECK-LABEL: test_mask_fold_vaddpd_256: ; CHECK: ## BB#0: ; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xe1,0xef,0xdb] ; CHECK-NEXT: vpcmpneqq %ymm3, %ymm2, %k1 ## encoding: [0x62,0xf3,0xed,0x28,0x1f,0xcb,0x04] ; CHECK-NEXT: vaddpd (%rdi), %ymm1, %ymm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x29,0x58,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <4 x i64> %mask1, zeroinitializer %tmp = load <4 x double>, <4 x double>* %j %x = fadd <4 x double> %i, %tmp %r = select <4 x i1> %mask, <4 x double> %x, <4 x double> %dst ret <4 x double> %r } define <4 x double> @test_maskz_fold_vaddpd_256(<4 x double> %i, <4 x double>* %j, <4 x i64> %mask1) nounwind { ; CHECK-LABEL: test_maskz_fold_vaddpd_256: ; CHECK: ## BB#0: ; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2] ; CHECK-NEXT: vpcmpneqq %ymm2, %ymm1, %k1 ## encoding: [0x62,0xf3,0xf5,0x28,0x1f,0xca,0x04] ; CHECK-NEXT: vaddpd (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0xa9,0x58,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <4 x i64> %mask1, zeroinitializer %tmp = load <4 x double>, <4 x double>* %j %x = fadd <4 x double> %i, %tmp %r = select <4 x i1> %mask, <4 x double> %x, <4 x double> zeroinitializer ret <4 x double> %r } define <4 x double> @test_broadcast2_vaddpd_256(<4 x double> %i, double* %j) nounwind { ; CHECK-LABEL: test_broadcast2_vaddpd_256: ; CHECK: ## BB#0: ; CHECK-NEXT: vaddpd (%rdi){1to4}, %ymm0, %ymm0 ## encoding: [0x62,0xf1,0xfd,0x38,0x58,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] %tmp = load double, double* %j %b = insertelement <4 x double> undef, double %tmp, i32 0 %c = shufflevector <4 x double> %b, <4 x double> undef, <4 x i32> zeroinitializer %x = fadd <4 x double> %c, %i ret <4 x double> %x } define <4 x double> @test_mask_broadcast_vaddpd_256(<4 x double> %dst, <4 x double> %i, double* %j, <4 x i64> %mask1) nounwind { ; CHECK-LABEL: test_mask_broadcast_vaddpd_256: ; CHECK: ## BB#0: ; CHECK-NEXT: vpxor %xmm0, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xef,0xc0] ; CHECK-NEXT: vpcmpneqq %ymm0, %ymm2, %k1 ## encoding: [0x62,0xf3,0xed,0x28,0x1f,0xc8,0x04] ; CHECK-NEXT: vaddpd (%rdi){1to4}, %ymm1, %ymm1 {%k1} ## encoding: [0x62,0xf1,0xf5,0x39,0x58,0x0f] ; CHECK-NEXT: vmovapd %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x28,0xc1] ; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <4 x i64> %mask1, zeroinitializer %tmp = load double, double* %j %b = insertelement <4 x double> undef, double %tmp, i32 0 %c = shufflevector <4 x double> %b, <4 x double> undef, <4 x i32> zeroinitializer %x = fadd <4 x double> %c, %i %r = select <4 x i1> %mask, <4 x double> %x, <4 x double> %i ret <4 x double> %r } define <4 x double> @test_maskz_broadcast_vaddpd_256(<4 x double> %i, double* %j, <4 x i64> %mask1) nounwind { ; CHECK-LABEL: test_maskz_broadcast_vaddpd_256: ; CHECK: ## BB#0: ; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2] ; CHECK-NEXT: vpcmpneqq %ymm2, %ymm1, %k1 ## encoding: [0x62,0xf3,0xf5,0x28,0x1f,0xca,0x04] ; CHECK-NEXT: vaddpd (%rdi){1to4}, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0xb9,0x58,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <4 x i64> %mask1, zeroinitializer %tmp = load double, double* %j %b = insertelement <4 x double> undef, double %tmp, i32 0 %c = shufflevector <4 x double> %b, <4 x double> undef, <4 x i32> zeroinitializer %x = fadd <4 x double> %c, %i %r = select <4 x i1> %mask, <4 x double> %x, <4 x double> zeroinitializer ret <4 x double> %r } ; 128-bit define <2 x i64> @vpaddq128_test(<2 x i64> %i, <2 x i64> %j) nounwind readnone { ; CHECK-LABEL: vpaddq128_test: ; CHECK: ## BB#0: ; CHECK-NEXT: vpaddq %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd4,0xc1] ; CHECK-NEXT: retq ## encoding: [0xc3] %x = add <2 x i64> %i, %j ret <2 x i64> %x } define <2 x i64> @vpaddq128_fold_test(<2 x i64> %i, <2 x i64>* %j) nounwind { ; CHECK-LABEL: vpaddq128_fold_test: ; CHECK: ## BB#0: ; CHECK-NEXT: vpaddq (%rdi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xd4,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] %tmp = load <2 x i64>, <2 x i64>* %j, align 4 %x = add <2 x i64> %i, %tmp ret <2 x i64> %x } define <2 x i64> @vpaddq128_broadcast2_test(<2 x i64> %i, i64* %j) nounwind { ; CHECK-LABEL: vpaddq128_broadcast2_test: ; CHECK: ## BB#0: ; CHECK-NEXT: vpaddq (%rdi){1to2}, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0xfd,0x18,0xd4,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] %tmp = load i64, i64* %j %j.0 = insertelement <2 x i64> undef, i64 %tmp, i32 0 %j.1 = insertelement <2 x i64> %j.0, i64 %tmp, i32 1 %x = add <2 x i64> %i, %j.1 ret <2 x i64> %x } define <4 x i32> @vpaddd128_test(<4 x i32> %i, <4 x i32> %j) nounwind readnone { ; CHECK-LABEL: vpaddd128_test: ; CHECK: ## BB#0: ; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0xc1] ; CHECK-NEXT: retq ## encoding: [0xc3] %x = add <4 x i32> %i, %j ret <4 x i32> %x } define <4 x i32> @vpaddd128_fold_test(<4 x i32> %i, <4 x i32>* %j) nounwind { ; CHECK-LABEL: vpaddd128_fold_test: ; CHECK: ## BB#0: ; CHECK-NEXT: vpaddd (%rdi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfe,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] %tmp = load <4 x i32>, <4 x i32>* %j, align 4 %x = add <4 x i32> %i, %tmp ret <4 x i32> %x } define <4 x i32> @vpaddd128_broadcast_test(<4 x i32> %i) nounwind { ; CHECK-LABEL: vpaddd128_broadcast_test: ; CHECK: ## BB#0: ; CHECK-NEXT: vpaddd {{.*}}(%rip){1to4}, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0x7d,0x18,0xfe,0x05,A,A,A,A] ; CHECK-NEXT: ## fixup A - offset: 6, value: LCPI42_0-4, kind: reloc_riprel_4byte ; CHECK-NEXT: retq ## encoding: [0xc3] %x = add <4 x i32> %i, <i32 6, i32 6, i32 6, i32 6> ret <4 x i32> %x } define <4 x i32> @vpaddd128_mask_test(<4 x i32> %i, <4 x i32> %j, <4 x i32> %mask1) nounwind readnone { ; CHECK-LABEL: vpaddd128_mask_test: ; CHECK: ## BB#0: ; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xe1,0xef,0xdb] ; CHECK-NEXT: vpcmpneqd %xmm3, %xmm2, %k1 ## encoding: [0x62,0xf3,0x6d,0x08,0x1f,0xcb,0x04] ; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xfe,0xc1] ; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <4 x i32> %mask1, zeroinitializer %x = add <4 x i32> %i, %j %r = select <4 x i1> %mask, <4 x i32> %x, <4 x i32> %i ret <4 x i32> %r } define <4 x i32> @vpaddd128_maskz_test(<4 x i32> %i, <4 x i32> %j, <4 x i32> %mask1) nounwind readnone { ; CHECK-LABEL: vpaddd128_maskz_test: ; CHECK: ## BB#0: ; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xe1,0xef,0xdb] ; CHECK-NEXT: vpcmpneqd %xmm3, %xmm2, %k1 ## encoding: [0x62,0xf3,0x6d,0x08,0x1f,0xcb,0x04] ; CHECK-NEXT: vpaddd %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xfe,0xc1] ; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <4 x i32> %mask1, zeroinitializer %x = add <4 x i32> %i, %j %r = select <4 x i1> %mask, <4 x i32> %x, <4 x i32> zeroinitializer ret <4 x i32> %r } define <4 x i32> @vpaddd128_mask_fold_test(<4 x i32> %i, <4 x i32>* %j.ptr, <4 x i32> %mask1) nounwind readnone { ; CHECK-LABEL: vpaddd128_mask_fold_test: ; CHECK: ## BB#0: ; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2] ; CHECK-NEXT: vpcmpneqd %xmm2, %xmm1, %k1 ## encoding: [0x62,0xf3,0x75,0x08,0x1f,0xca,0x04] ; CHECK-NEXT: vpaddd (%rdi), %xmm0, %xmm0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xfe,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <4 x i32> %mask1, zeroinitializer %j = load <4 x i32>, <4 x i32>* %j.ptr %x = add <4 x i32> %i, %j %r = select <4 x i1> %mask, <4 x i32> %x, <4 x i32> %i ret <4 x i32> %r } define <4 x i32> @vpaddd128_mask_broadcast_test(<4 x i32> %i, <4 x i32> %mask1) nounwind readnone { ; CHECK-LABEL: vpaddd128_mask_broadcast_test: ; CHECK: ## BB#0: ; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2] ; CHECK-NEXT: vpcmpneqd %xmm2, %xmm1, %k1 ## encoding: [0x62,0xf3,0x75,0x08,0x1f,0xca,0x04] ; CHECK-NEXT: vpaddd {{.*}}(%rip){1to4}, %xmm0, %xmm0 {%k1} ## encoding: [0x62,0xf1,0x7d,0x19,0xfe,0x05,A,A,A,A] ; CHECK-NEXT: ## fixup A - offset: 6, value: LCPI46_0-4, kind: reloc_riprel_4byte ; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <4 x i32> %mask1, zeroinitializer %x = add <4 x i32> %i, <i32 7, i32 7, i32 7, i32 7> %r = select <4 x i1> %mask, <4 x i32> %x, <4 x i32> %i ret <4 x i32> %r } define <4 x i32> @vpaddd128_maskz_fold_test(<4 x i32> %i, <4 x i32>* %j.ptr, <4 x i32> %mask1) nounwind readnone { ; CHECK-LABEL: vpaddd128_maskz_fold_test: ; CHECK: ## BB#0: ; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2] ; CHECK-NEXT: vpcmpneqd %xmm2, %xmm1, %k1 ## encoding: [0x62,0xf3,0x75,0x08,0x1f,0xca,0x04] ; CHECK-NEXT: vpaddd (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xfe,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <4 x i32> %mask1, zeroinitializer %j = load <4 x i32>, <4 x i32>* %j.ptr %x = add <4 x i32> %i, %j %r = select <4 x i1> %mask, <4 x i32> %x, <4 x i32> zeroinitializer ret <4 x i32> %r } define <4 x i32> @vpaddd128_maskz_broadcast_test(<4 x i32> %i, <4 x i32> %mask1) nounwind readnone { ; CHECK-LABEL: vpaddd128_maskz_broadcast_test: ; CHECK: ## BB#0: ; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2] ; CHECK-NEXT: vpcmpneqd %xmm2, %xmm1, %k1 ## encoding: [0x62,0xf3,0x75,0x08,0x1f,0xca,0x04] ; CHECK-NEXT: vpaddd {{.*}}(%rip){1to4}, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x99,0xfe,0x05,A,A,A,A] ; CHECK-NEXT: ## fixup A - offset: 6, value: LCPI48_0-4, kind: reloc_riprel_4byte ; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <4 x i32> %mask1, zeroinitializer %x = add <4 x i32> %i, <i32 8, i32 8, i32 8, i32 8> %r = select <4 x i1> %mask, <4 x i32> %x, <4 x i32> zeroinitializer ret <4 x i32> %r } define <2 x i64> @vpsubq128_test(<2 x i64> %i, <2 x i64> %j) nounwind readnone { ; CHECK-LABEL: vpsubq128_test: ; CHECK: ## BB#0: ; CHECK-NEXT: vpsubq %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfb,0xc1] ; CHECK-NEXT: retq ## encoding: [0xc3] %x = sub <2 x i64> %i, %j ret <2 x i64> %x } define <4 x i32> @vpsubd128_test(<4 x i32> %i, <4 x i32> %j) nounwind readnone { ; CHECK-LABEL: vpsubd128_test: ; CHECK: ## BB#0: ; CHECK-NEXT: vpsubd %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xfa,0xc1] ; CHECK-NEXT: retq ## encoding: [0xc3] %x = sub <4 x i32> %i, %j ret <4 x i32> %x } define <4 x i32> @vpmulld128_test(<4 x i32> %i, <4 x i32> %j) { ; CHECK-LABEL: vpmulld128_test: ; CHECK: ## BB#0: ; CHECK-NEXT: vpmulld %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc4,0xe2,0x79,0x40,0xc1] ; CHECK-NEXT: retq ## encoding: [0xc3] %x = mul <4 x i32> %i, %j ret <4 x i32> %x } define <2 x double> @test_vaddpd_128(<2 x double> %y, <2 x double> %x) { ; CHECK-LABEL: test_vaddpd_128: ; CHECK: ## BB#0: ## %entry ; CHECK-NEXT: vaddpd %xmm0, %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf1,0x58,0xc0] ; CHECK-NEXT: retq ## encoding: [0xc3] entry: %add.i = fadd <2 x double> %x, %y ret <2 x double> %add.i } define <2 x double> @test_fold_vaddpd_128(<2 x double> %y) { ; CHECK-LABEL: test_fold_vaddpd_128: ; CHECK: ## BB#0: ## %entry ; CHECK-NEXT: vaddpd {{.*}}(%rip), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x58,0x05,A,A,A,A] ; CHECK-NEXT: ## fixup A - offset: 4, value: LCPI53_0-4, kind: reloc_riprel_4byte ; CHECK-NEXT: retq ## encoding: [0xc3] entry: %add.i = fadd <2 x double> %y, <double 4.500000e+00, double 3.400000e+00> ret <2 x double> %add.i } define <4 x float> @test_broadcast_vaddpd_128(<4 x float> %a) nounwind { ; CHECK-LABEL: test_broadcast_vaddpd_128: ; CHECK: ## BB#0: ; CHECK-NEXT: vaddps {{.*}}(%rip){1to4}, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0x7c,0x18,0x58,0x05,A,A,A,A] ; CHECK-NEXT: ## fixup A - offset: 6, value: LCPI54_0-4, kind: reloc_riprel_4byte ; CHECK-NEXT: retq ## encoding: [0xc3] %b = fadd <4 x float> %a, <float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000, float 0x3FB99999A0000000> ret <4 x float> %b } define <4 x float> @test_mask_vaddps_128(<4 x float> %dst, <4 x float> %i, <4 x float> %j, <4 x i32> %mask1) nounwind readnone { ; CHECK-LABEL: test_mask_vaddps_128: ; CHECK: ## BB#0: ; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4] ; CHECK-NEXT: vpcmpneqd %xmm4, %xmm3, %k1 ## encoding: [0x62,0xf3,0x65,0x08,0x1f,0xcc,0x04] ; CHECK-NEXT: vaddps %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0x74,0x09,0x58,0xc2] ; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <4 x i32> %mask1, zeroinitializer %x = fadd <4 x float> %i, %j %r = select <4 x i1> %mask, <4 x float> %x, <4 x float> %dst ret <4 x float> %r } define <4 x float> @test_mask_vmulps_128(<4 x float> %dst, <4 x float> %i, <4 x float> %j, <4 x i32> %mask1) nounwind readnone { ; CHECK-LABEL: test_mask_vmulps_128: ; CHECK: ## BB#0: ; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4] ; CHECK-NEXT: vpcmpneqd %xmm4, %xmm3, %k1 ## encoding: [0x62,0xf3,0x65,0x08,0x1f,0xcc,0x04] ; CHECK-NEXT: vmulps %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0x74,0x09,0x59,0xc2] ; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <4 x i32> %mask1, zeroinitializer %x = fmul <4 x float> %i, %j %r = select <4 x i1> %mask, <4 x float> %x, <4 x float> %dst ret <4 x float> %r } define <4 x float> @test_mask_vminps_128(<4 x float> %dst, <4 x float> %i, <4 x float> %j, <4 x i32> %mask1) nounwind readnone { ; CHECK-LABEL: test_mask_vminps_128: ; CHECK: ## BB#0: ; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4] ; CHECK-NEXT: vpcmpneqd %xmm4, %xmm3, %k1 ## encoding: [0x62,0xf3,0x65,0x08,0x1f,0xcc,0x04] ; CHECK-NEXT: vminps %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0x74,0x09,0x5d,0xc2] ; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <4 x i32> %mask1, zeroinitializer %cmp_res = fcmp olt <4 x float> %i, %j %min = select <4 x i1> %cmp_res, <4 x float> %i, <4 x float> %j %r = select <4 x i1> %mask, <4 x float> %min, <4 x float> %dst ret <4 x float> %r } define <4 x float> @test_mask_vmaxps_128(<4 x float> %dst, <4 x float> %i, <4 x float> %j, <4 x i32> %mask1) nounwind readnone { ; CHECK-LABEL: test_mask_vmaxps_128: ; CHECK: ## BB#0: ; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4] ; CHECK-NEXT: vpcmpneqd %xmm4, %xmm3, %k1 ## encoding: [0x62,0xf3,0x65,0x08,0x1f,0xcc,0x04] ; CHECK-NEXT: vmaxps %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0x74,0x09,0x5f,0xc2] ; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <4 x i32> %mask1, zeroinitializer %cmp_res = fcmp ogt <4 x float> %i, %j %max = select <4 x i1> %cmp_res, <4 x float> %i, <4 x float> %j %r = select <4 x i1> %mask, <4 x float> %max, <4 x float> %dst ret <4 x float> %r } define <4 x float> @test_mask_vsubps_128(<4 x float> %dst, <4 x float> %i, <4 x float> %j, <4 x i32> %mask1) nounwind readnone { ; CHECK-LABEL: test_mask_vsubps_128: ; CHECK: ## BB#0: ; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4] ; CHECK-NEXT: vpcmpneqd %xmm4, %xmm3, %k1 ## encoding: [0x62,0xf3,0x65,0x08,0x1f,0xcc,0x04] ; CHECK-NEXT: vsubps %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0x74,0x09,0x5c,0xc2] ; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <4 x i32> %mask1, zeroinitializer %x = fsub <4 x float> %i, %j %r = select <4 x i1> %mask, <4 x float> %x, <4 x float> %dst ret <4 x float> %r } define <4 x float> @test_mask_vdivps_128(<4 x float> %dst, <4 x float> %i, <4 x float> %j, <4 x i32> %mask1) nounwind readnone { ; CHECK-LABEL: test_mask_vdivps_128: ; CHECK: ## BB#0: ; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4] ; CHECK-NEXT: vpcmpneqd %xmm4, %xmm3, %k1 ## encoding: [0x62,0xf3,0x65,0x08,0x1f,0xcc,0x04] ; CHECK-NEXT: vdivps %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0x74,0x09,0x5e,0xc2] ; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <4 x i32> %mask1, zeroinitializer %x = fdiv <4 x float> %i, %j %r = select <4 x i1> %mask, <4 x float> %x, <4 x float> %dst ret <4 x float> %r } define <2 x double> @test_mask_vmulpd_128(<2 x double> %dst, <2 x double> %i, <2 x double> %j, <2 x i64> %mask1) nounwind readnone { ; CHECK-LABEL: test_mask_vmulpd_128: ; CHECK: ## BB#0: ; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4] ; CHECK-NEXT: vpcmpneqq %xmm4, %xmm3, %k1 ## encoding: [0x62,0xf3,0xe5,0x08,0x1f,0xcc,0x04] ; CHECK-NEXT: vmulpd %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x09,0x59,0xc2] ; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <2 x i64> %mask1, zeroinitializer %x = fmul <2 x double> %i, %j %r = select <2 x i1> %mask, <2 x double> %x, <2 x double> %dst ret <2 x double> %r } define <2 x double> @test_mask_vminpd_128(<2 x double> %dst, <2 x double> %i, <2 x double> %j, <2 x i64> %mask1) nounwind readnone { ; CHECK-LABEL: test_mask_vminpd_128: ; CHECK: ## BB#0: ; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4] ; CHECK-NEXT: vpcmpneqq %xmm4, %xmm3, %k1 ## encoding: [0x62,0xf3,0xe5,0x08,0x1f,0xcc,0x04] ; CHECK-NEXT: vminpd %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x09,0x5d,0xc2] ; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <2 x i64> %mask1, zeroinitializer %cmp_res = fcmp olt <2 x double> %i, %j %min = select <2 x i1> %cmp_res, <2 x double> %i, <2 x double> %j %r = select <2 x i1> %mask, <2 x double> %min, <2 x double> %dst ret <2 x double> %r } define <2 x double> @test_mask_vmaxpd_128(<2 x double> %dst, <2 x double> %i, <2 x double> %j, <2 x i64> %mask1) nounwind readnone { ; CHECK-LABEL: test_mask_vmaxpd_128: ; CHECK: ## BB#0: ; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4] ; CHECK-NEXT: vpcmpneqq %xmm4, %xmm3, %k1 ## encoding: [0x62,0xf3,0xe5,0x08,0x1f,0xcc,0x04] ; CHECK-NEXT: vmaxpd %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x09,0x5f,0xc2] ; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <2 x i64> %mask1, zeroinitializer %cmp_res = fcmp ogt <2 x double> %i, %j %max = select <2 x i1> %cmp_res, <2 x double> %i, <2 x double> %j %r = select <2 x i1> %mask, <2 x double> %max, <2 x double> %dst ret <2 x double> %r } define <2 x double> @test_mask_vsubpd_128(<2 x double> %dst, <2 x double> %i, <2 x double> %j, <2 x i64> %mask1) nounwind readnone { ; CHECK-LABEL: test_mask_vsubpd_128: ; CHECK: ## BB#0: ; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4] ; CHECK-NEXT: vpcmpneqq %xmm4, %xmm3, %k1 ## encoding: [0x62,0xf3,0xe5,0x08,0x1f,0xcc,0x04] ; CHECK-NEXT: vsubpd %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x09,0x5c,0xc2] ; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <2 x i64> %mask1, zeroinitializer %x = fsub <2 x double> %i, %j %r = select <2 x i1> %mask, <2 x double> %x, <2 x double> %dst ret <2 x double> %r } define <2 x double> @test_mask_vdivpd_128(<2 x double> %dst, <2 x double> %i, <2 x double> %j, <2 x i64> %mask1) nounwind readnone { ; CHECK-LABEL: test_mask_vdivpd_128: ; CHECK: ## BB#0: ; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4] ; CHECK-NEXT: vpcmpneqq %xmm4, %xmm3, %k1 ## encoding: [0x62,0xf3,0xe5,0x08,0x1f,0xcc,0x04] ; CHECK-NEXT: vdivpd %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x09,0x5e,0xc2] ; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <2 x i64> %mask1, zeroinitializer %x = fdiv <2 x double> %i, %j %r = select <2 x i1> %mask, <2 x double> %x, <2 x double> %dst ret <2 x double> %r } define <2 x double> @test_mask_vaddpd_128(<2 x double> %dst, <2 x double> %i, <2 x double> %j, <2 x i64> %mask1) nounwind readnone { ; CHECK-LABEL: test_mask_vaddpd_128: ; CHECK: ## BB#0: ; CHECK-NEXT: vpxor %xmm4, %xmm4, %xmm4 ## EVEX TO VEX Compression encoding: [0xc5,0xd9,0xef,0xe4] ; CHECK-NEXT: vpcmpneqq %xmm4, %xmm3, %k1 ## encoding: [0x62,0xf3,0xe5,0x08,0x1f,0xcc,0x04] ; CHECK-NEXT: vaddpd %xmm2, %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x09,0x58,0xc2] ; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <2 x i64> %mask1, zeroinitializer %x = fadd <2 x double> %i, %j %r = select <2 x i1> %mask, <2 x double> %x, <2 x double> %dst ret <2 x double> %r } define <2 x double> @test_maskz_vaddpd_128(<2 x double> %i, <2 x double> %j, ; CHECK-LABEL: test_maskz_vaddpd_128: ; CHECK: ## BB#0: ; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xe1,0xef,0xdb] ; CHECK-NEXT: vpcmpneqq %xmm3, %xmm2, %k1 ## encoding: [0x62,0xf3,0xed,0x08,0x1f,0xcb,0x04] ; CHECK-NEXT: vaddpd %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0x89,0x58,0xc1] ; CHECK-NEXT: retq ## encoding: [0xc3] <2 x i64> %mask1) nounwind readnone { %mask = icmp ne <2 x i64> %mask1, zeroinitializer %x = fadd <2 x double> %i, %j %r = select <2 x i1> %mask, <2 x double> %x, <2 x double> zeroinitializer ret <2 x double> %r } define <2 x double> @test_mask_fold_vaddpd_128(<2 x double> %dst, <2 x double> %i, <2 x double>* %j, <2 x i64> %mask1) nounwind { ; CHECK-LABEL: test_mask_fold_vaddpd_128: ; CHECK: ## BB#0: ; CHECK-NEXT: vpxor %xmm3, %xmm3, %xmm3 ## EVEX TO VEX Compression encoding: [0xc5,0xe1,0xef,0xdb] ; CHECK-NEXT: vpcmpneqq %xmm3, %xmm2, %k1 ## encoding: [0x62,0xf3,0xed,0x08,0x1f,0xcb,0x04] ; CHECK-NEXT: vaddpd (%rdi), %xmm1, %xmm0 {%k1} ## encoding: [0x62,0xf1,0xf5,0x09,0x58,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <2 x i64> %mask1, zeroinitializer %tmp = load <2 x double>, <2 x double>* %j %x = fadd <2 x double> %i, %tmp %r = select <2 x i1> %mask, <2 x double> %x, <2 x double> %dst ret <2 x double> %r } define <2 x double> @test_maskz_fold_vaddpd_128(<2 x double> %i, <2 x double>* %j, <2 x i64> %mask1) nounwind { ; CHECK-LABEL: test_maskz_fold_vaddpd_128: ; CHECK: ## BB#0: ; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2] ; CHECK-NEXT: vpcmpneqq %xmm2, %xmm1, %k1 ## encoding: [0x62,0xf3,0xf5,0x08,0x1f,0xca,0x04] ; CHECK-NEXT: vaddpd (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0x89,0x58,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <2 x i64> %mask1, zeroinitializer %tmp = load <2 x double>, <2 x double>* %j %x = fadd <2 x double> %i, %tmp %r = select <2 x i1> %mask, <2 x double> %x, <2 x double> zeroinitializer ret <2 x double> %r } define <2 x double> @test_broadcast2_vaddpd_128(<2 x double> %i, double* %j) nounwind { ; CHECK-LABEL: test_broadcast2_vaddpd_128: ; CHECK: ## BB#0: ; CHECK-NEXT: vaddpd (%rdi){1to2}, %xmm0, %xmm0 ## encoding: [0x62,0xf1,0xfd,0x18,0x58,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] %tmp = load double, double* %j %j.0 = insertelement <2 x double> undef, double %tmp, i64 0 %j.1 = insertelement <2 x double> %j.0, double %tmp, i64 1 %x = fadd <2 x double> %j.1, %i ret <2 x double> %x } define <2 x double> @test_mask_broadcast_vaddpd_128(<2 x double> %dst, <2 x double> %i, double* %j, <2 x i64> %mask1) nounwind { ; CHECK-LABEL: test_mask_broadcast_vaddpd_128: ; CHECK: ## BB#0: ; CHECK-NEXT: vpxor %xmm0, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xef,0xc0] ; CHECK-NEXT: vpcmpneqq %xmm0, %xmm2, %k1 ## encoding: [0x62,0xf3,0xed,0x08,0x1f,0xc8,0x04] ; CHECK-NEXT: vaddpd (%rdi){1to2}, %xmm1, %xmm1 {%k1} ## encoding: [0x62,0xf1,0xf5,0x19,0x58,0x0f] ; CHECK-NEXT: vmovapd %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x28,0xc1] ; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <2 x i64> %mask1, zeroinitializer %tmp = load double, double* %j %j.0 = insertelement <2 x double> undef, double %tmp, i64 0 %j.1 = insertelement <2 x double> %j.0, double %tmp, i64 1 %x = fadd <2 x double> %j.1, %i %r = select <2 x i1> %mask, <2 x double> %x, <2 x double> %i ret <2 x double> %r } define <2 x double> @test_maskz_broadcast_vaddpd_128(<2 x double> %i, double* %j, <2 x i64> %mask1) nounwind { ; CHECK-LABEL: test_maskz_broadcast_vaddpd_128: ; CHECK: ## BB#0: ; CHECK-NEXT: vpxor %xmm2, %xmm2, %xmm2 ## EVEX TO VEX Compression encoding: [0xc5,0xe9,0xef,0xd2] ; CHECK-NEXT: vpcmpneqq %xmm2, %xmm1, %k1 ## encoding: [0x62,0xf3,0xf5,0x08,0x1f,0xca,0x04] ; CHECK-NEXT: vaddpd (%rdi){1to2}, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0xfd,0x99,0x58,0x07] ; CHECK-NEXT: retq ## encoding: [0xc3] %mask = icmp ne <2 x i64> %mask1, zeroinitializer %tmp = load double, double* %j %j.0 = insertelement <2 x double> undef, double %tmp, i64 0 %j.1 = insertelement <2 x double> %j.0, double %tmp, i64 1 %x = fadd <2 x double> %j.1, %i %r = select <2 x i1> %mask, <2 x double> %x, <2 x double> zeroinitializer ret <2 x double> %r }