comparison test/CodeGen/X86/avx.ll @ 100:7d135dc70f03 LLVM 3.9

LLVM 3.9
author Miyagi Mitsuki <e135756@ie.u-ryukyu.ac.jp>
date Tue, 26 Jan 2016 22:53:40 +0900
parents afa8332a0e37
children 803732b1fca8
comparison
equal deleted inserted replaced
96:6418606d0ead 100:7d135dc70f03
30 define <4 x float> @insertps_from_vector_load(<4 x float> %a, <4 x float>* nocapture readonly %pb) { 30 define <4 x float> @insertps_from_vector_load(<4 x float> %a, <4 x float>* nocapture readonly %pb) {
31 ; CHECK-LABEL: insertps_from_vector_load: 31 ; CHECK-LABEL: insertps_from_vector_load:
32 ; On X32, account for the argument's move to registers 32 ; On X32, account for the argument's move to registers
33 ; X32: movl 4(%esp), %eax 33 ; X32: movl 4(%esp), %eax
34 ; CHECK-NOT: mov 34 ; CHECK-NOT: mov
35 ; CHECK: insertps $48 35 ; CHECK: vinsertps $48, (%{{...}}), {{.*#+}} xmm0 = xmm0[0,1,2],mem[0]
36 ; CHECK-NEXT: ret 36 ; CHECK-NEXT: ret
37 %1 = load <4 x float>, <4 x float>* %pb, align 16 37 %1 = load <4 x float>, <4 x float>* %pb, align 16
38 %2 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %1, i32 48) 38 %2 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %1, i32 48)
39 ret <4 x float> %2 39 ret <4 x float> %2
40 } 40 }
44 ; CHECK-LABEL: insertps_from_vector_load_offset: 44 ; CHECK-LABEL: insertps_from_vector_load_offset:
45 ; On X32, account for the argument's move to registers 45 ; On X32, account for the argument's move to registers
46 ; X32: movl 4(%esp), %eax 46 ; X32: movl 4(%esp), %eax
47 ; CHECK-NOT: mov 47 ; CHECK-NOT: mov
48 ;; Try to match a bit more of the instr, since we need the load's offset. 48 ;; Try to match a bit more of the instr, since we need the load's offset.
49 ; CHECK: insertps $96, 4(%{{...}}), % 49 ; CHECK: vinsertps $32, 4(%{{...}}), {{.*#+}} xmm0 = xmm0[0,1],mem[0],xmm0[3]
50 ; CHECK-NEXT: ret 50 ; CHECK-NEXT: ret
51 %1 = load <4 x float>, <4 x float>* %pb, align 16 51 %1 = load <4 x float>, <4 x float>* %pb, align 16
52 %2 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %1, i32 96) 52 %2 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %1, i32 96)
53 ret <4 x float> %2 53 ret <4 x float> %2
54 } 54 }
58 ; On X32, account for the argument's move to registers 58 ; On X32, account for the argument's move to registers
59 ; X32: movl 4(%esp), %eax 59 ; X32: movl 4(%esp), %eax
60 ; X32: movl 8(%esp), %ecx 60 ; X32: movl 8(%esp), %ecx
61 ; CHECK-NOT: mov 61 ; CHECK-NOT: mov
62 ;; Try to match a bit more of the instr, since we need the load's offset. 62 ;; Try to match a bit more of the instr, since we need the load's offset.
63 ; CHECK: vinsertps $192, 12(%{{...}},%{{...}}), % 63 ; CHECK: vinsertps $0, 12(%{{...}},%{{...}}), {{.*#+}} xmm0 = mem[0],xmm0[1,2,3]
64 ; CHECK-NEXT: ret 64 ; CHECK-NEXT: ret
65 %1 = getelementptr inbounds <4 x float>, <4 x float>* %pb, i64 %index 65 %1 = getelementptr inbounds <4 x float>, <4 x float>* %pb, i64 %index
66 %2 = load <4 x float>, <4 x float>* %1, align 16 66 %2 = load <4 x float>, <4 x float>* %1, align 16
67 %3 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %2, i32 192) 67 %3 = tail call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> %2, i32 192)
68 ret <4 x float> %3 68 ret <4 x float> %3