comparison test/CodeGen/X86/dag-merge-fast-accesses.ll @ 95:afa8332a0e37 LLVM3.8

LLVM 3.8
author Kaito Tokumori <e105711@ie.u-ryukyu.ac.jp>
date Tue, 13 Oct 2015 17:48:58 +0900
parents
children 7d135dc70f03
comparison
equal deleted inserted replaced
84:f3e34b893a5f 95:afa8332a0e37
1 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=-slow-unaligned-mem-16 | FileCheck %s --check-prefix=FAST
2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+slow-unaligned-mem-16 | FileCheck %s --check-prefix=SLOW
3
4 ; Verify that the DAGCombiner is creating unaligned 16-byte loads and stores
5 ; if and only if those are fast.
6
7 define void @merge_const_vec_store(i64* %ptr) {
8 ; FAST-LABEL: merge_const_vec_store:
9 ; FAST: # BB#0:
10 ; FAST-NEXT: xorps %xmm0, %xmm0
11 ; FAST-NEXT: movups %xmm0, (%rdi)
12 ; FAST-NEXT: retq
13 ;
14 ; SLOW-LABEL: merge_const_vec_store:
15 ; SLOW: # BB#0:
16 ; SLOW-NEXT: movq $0, (%rdi)
17 ; SLOW-NEXT: movq $0, 8(%rdi)
18 ; SLOW-NEXT: retq
19
20 %idx0 = getelementptr i64, i64* %ptr, i64 0
21 %idx1 = getelementptr i64, i64* %ptr, i64 1
22
23 store i64 0, i64* %idx0, align 8
24 store i64 0, i64* %idx1, align 8
25 ret void
26 }
27
28
29 define void @merge_vec_element_store(<4 x double> %v, double* %ptr) {
30 ; FAST-LABEL: merge_vec_element_store:
31 ; FAST: # BB#0:
32 ; FAST-NEXT: movups %xmm0, (%rdi)
33 ; FAST-NEXT: retq
34 ;
35 ; SLOW-LABEL: merge_vec_element_store:
36 ; SLOW: # BB#0:
37 ; SLOW-NEXT: movlpd %xmm0, (%rdi)
38 ; SLOW-NEXT: movhpd %xmm0, 8(%rdi)
39 ; SLOW-NEXT: retq
40
41 %vecext0 = extractelement <4 x double> %v, i32 0
42 %vecext1 = extractelement <4 x double> %v, i32 1
43
44 %idx0 = getelementptr double, double* %ptr, i64 0
45 %idx1 = getelementptr double, double* %ptr, i64 1
46
47 store double %vecext0, double* %idx0, align 8
48 store double %vecext1, double* %idx1, align 8
49 ret void
50 }
51
52
53 define void @merge_vec_load_and_stores(i64 *%ptr) {
54 ; FAST-LABEL: merge_vec_load_and_stores:
55 ; FAST: # BB#0:
56 ; FAST-NEXT: movups (%rdi), %xmm0
57 ; FAST-NEXT: movups %xmm0, 40(%rdi)
58 ; FAST-NEXT: retq
59 ;
60 ; SLOW-LABEL: merge_vec_load_and_stores:
61 ; SLOW: # BB#0:
62 ; SLOW-NEXT: movq (%rdi), %rax
63 ; SLOW-NEXT: movq 8(%rdi), %rcx
64 ; SLOW-NEXT: movq %rax, 40(%rdi)
65 ; SLOW-NEXT: movq %rcx, 48(%rdi)
66 ; SLOW-NEXT: retq
67
68 %idx0 = getelementptr i64, i64* %ptr, i64 0
69 %idx1 = getelementptr i64, i64* %ptr, i64 1
70
71 %ld0 = load i64, i64* %idx0, align 4
72 %ld1 = load i64, i64* %idx1, align 4
73
74 %idx4 = getelementptr i64, i64* %ptr, i64 5
75 %idx5 = getelementptr i64, i64* %ptr, i64 6
76
77 store i64 %ld0, i64* %idx4, align 4
78 store i64 %ld1, i64* %idx5, align 4
79 ret void
80 }
81