Mercurial > hg > CbC > CbC_llvm
comparison test/CodeGen/X86/atomic6432.ll @ 77:54457678186b LLVM3.6
LLVM 3.6
author | Kaito Tokumori <e105711@ie.u-ryukyu.ac.jp> |
---|---|
date | Mon, 08 Sep 2014 22:06:00 +0900 |
parents | 95c75e76d11b |
children | afa8332a0e37 |
comparison
equal
deleted
inserted
replaced
34:e874dbf0ad9d | 77:54457678186b |
---|---|
1 ; RUN: llc < %s -O0 -march=x86 -mcpu=corei7 -verify-machineinstrs | FileCheck %s --check-prefix X32 | 1 ; RUN: llc < %s -O0 -march=x86 -mcpu=corei7 -verify-machineinstrs | FileCheck %s --check-prefix X32 |
2 | 2 |
3 @sc64 = external global i64 | 3 @sc64 = external global i64 |
4 | 4 |
5 define void @atomic_fetch_add64() nounwind { | 5 define void @atomic_fetch_add64() nounwind { |
6 ; X32: atomic_fetch_add64 | 6 ; X64-LABEL: atomic_fetch_add64: |
7 ; X32-LABEL: atomic_fetch_add64: | |
7 entry: | 8 entry: |
8 %t1 = atomicrmw add i64* @sc64, i64 1 acquire | 9 %t1 = atomicrmw add i64* @sc64, i64 1 acquire |
9 ; X32: addl | 10 ; X32: addl |
10 ; X32: adcl | 11 ; X32: adcl |
11 ; X32: lock | 12 ; X32: lock |
28 ret void | 29 ret void |
29 ; X32: ret | 30 ; X32: ret |
30 } | 31 } |
31 | 32 |
32 define void @atomic_fetch_sub64() nounwind { | 33 define void @atomic_fetch_sub64() nounwind { |
33 ; X32: atomic_fetch_sub64 | 34 ; X64-LABEL: atomic_fetch_sub64: |
35 ; X32-LABEL: atomic_fetch_sub64: | |
34 %t1 = atomicrmw sub i64* @sc64, i64 1 acquire | 36 %t1 = atomicrmw sub i64* @sc64, i64 1 acquire |
37 ; X32: addl $-1 | |
38 ; X32: adcl $-1 | |
39 ; X32: lock | |
40 ; X32: cmpxchg8b | |
41 %t2 = atomicrmw sub i64* @sc64, i64 3 acquire | |
42 ; X32: addl $-3 | |
43 ; X32: adcl $-1 | |
44 ; X32: lock | |
45 ; X32: cmpxchg8b | |
46 %t3 = atomicrmw sub i64* @sc64, i64 5 acquire | |
47 ; X32: addl $-5 | |
48 ; X32: adcl $-1 | |
49 ; X32: lock | |
50 ; X32: cmpxchg8b | |
51 %t4 = atomicrmw sub i64* @sc64, i64 %t3 acquire | |
35 ; X32: subl | 52 ; X32: subl |
36 ; X32: sbbl | 53 ; X32: sbbl |
37 ; X32: lock | 54 ; X32: lock |
38 ; X32: cmpxchg8b | 55 ; X32: cmpxchg8b |
39 %t2 = atomicrmw sub i64* @sc64, i64 3 acquire | |
40 ; X32: subl | |
41 ; X32: sbbl | |
42 ; X32: lock | |
43 ; X32: cmpxchg8b | |
44 %t3 = atomicrmw sub i64* @sc64, i64 5 acquire | |
45 ; X32: subl | |
46 ; X32: sbbl | |
47 ; X32: lock | |
48 ; X32: cmpxchg8b | |
49 %t4 = atomicrmw sub i64* @sc64, i64 %t3 acquire | |
50 ; X32: subl | |
51 ; X32: sbbl | |
52 ; X32: lock | |
53 ; X32: cmpxchg8b | |
54 ret void | 56 ret void |
55 ; X32: ret | 57 ; X32: ret |
56 } | 58 } |
57 | 59 |
58 define void @atomic_fetch_and64() nounwind { | 60 define void @atomic_fetch_and64() nounwind { |
59 ; X32: atomic_fetch_and64 | 61 ; X64-LABEL: atomic_fetch_and:64 |
62 ; X32-LABEL: atomic_fetch_and64: | |
60 %t1 = atomicrmw and i64* @sc64, i64 3 acquire | 63 %t1 = atomicrmw and i64* @sc64, i64 3 acquire |
61 ; X32: andl | 64 ; X32: andl $3 |
62 ; X32: andl | 65 ; X32-NOT: andl |
63 ; X32: lock | 66 ; X32: lock |
64 ; X32: cmpxchg8b | 67 ; X32: cmpxchg8b |
65 %t2 = atomicrmw and i64* @sc64, i64 5 acquire | 68 %t2 = atomicrmw and i64* @sc64, i64 4294967297 acquire |
66 ; X32: andl | 69 ; X32: andl $1 |
67 ; X32: andl | 70 ; X32: andl $1 |
68 ; X32: lock | 71 ; X32: lock |
69 ; X32: cmpxchg8b | 72 ; X32: cmpxchg8b |
70 %t3 = atomicrmw and i64* @sc64, i64 %t2 acquire | 73 %t3 = atomicrmw and i64* @sc64, i64 %t2 acquire |
71 ; X32: andl | 74 ; X32: andl |
72 ; X32: andl | 75 ; X32: andl |
75 ret void | 78 ret void |
76 ; X32: ret | 79 ; X32: ret |
77 } | 80 } |
78 | 81 |
79 define void @atomic_fetch_or64() nounwind { | 82 define void @atomic_fetch_or64() nounwind { |
80 ; X32: atomic_fetch_or64 | 83 ; X64-LABEL: atomic_fetch_or64: |
84 ; X32-LABEL: atomic_fetch_or64: | |
81 %t1 = atomicrmw or i64* @sc64, i64 3 acquire | 85 %t1 = atomicrmw or i64* @sc64, i64 3 acquire |
82 ; X32: orl | 86 ; X32: orl $3 |
83 ; X32: orl | 87 ; X32-NOT: orl |
84 ; X32: lock | 88 ; X32: lock |
85 ; X32: cmpxchg8b | 89 ; X32: cmpxchg8b |
86 %t2 = atomicrmw or i64* @sc64, i64 5 acquire | 90 %t2 = atomicrmw or i64* @sc64, i64 4294967297 acquire |
87 ; X32: orl | 91 ; X32: orl $1 |
88 ; X32: orl | 92 ; X32: orl $1 |
89 ; X32: lock | 93 ; X32: lock |
90 ; X32: cmpxchg8b | 94 ; X32: cmpxchg8b |
91 %t3 = atomicrmw or i64* @sc64, i64 %t2 acquire | 95 %t3 = atomicrmw or i64* @sc64, i64 %t2 acquire |
92 ; X32: orl | 96 ; X32: orl |
93 ; X32: orl | 97 ; X32: orl |
96 ret void | 100 ret void |
97 ; X32: ret | 101 ; X32: ret |
98 } | 102 } |
99 | 103 |
100 define void @atomic_fetch_xor64() nounwind { | 104 define void @atomic_fetch_xor64() nounwind { |
101 ; X32: atomic_fetch_xor64 | 105 ; X64-LABEL: atomic_fetch_xor:64 |
106 ; X32-LABEL: atomic_fetch_xor64: | |
102 %t1 = atomicrmw xor i64* @sc64, i64 3 acquire | 107 %t1 = atomicrmw xor i64* @sc64, i64 3 acquire |
103 ; X32: xorl | 108 ; X32: xorl |
104 ; X32: xorl | 109 ; X32-NOT: xorl |
105 ; X32: lock | 110 ; X32: lock |
106 ; X32: cmpxchg8b | 111 ; X32: cmpxchg8b |
107 %t2 = atomicrmw xor i64* @sc64, i64 5 acquire | 112 %t2 = atomicrmw xor i64* @sc64, i64 4294967297 acquire |
108 ; X32: xorl | 113 ; X32: xorl $1 |
109 ; X32: xorl | 114 ; X32: xorl $1 |
110 ; X32: lock | 115 ; X32: lock |
111 ; X32: cmpxchg8b | 116 ; X32: cmpxchg8b |
112 %t3 = atomicrmw xor i64* @sc64, i64 %t2 acquire | 117 %t3 = atomicrmw xor i64* @sc64, i64 %t2 acquire |
113 ; X32: xorl | 118 ; X32: xorl |
114 ; X32: xorl | 119 ; X32: xorl |
117 ret void | 122 ret void |
118 ; X32: ret | 123 ; X32: ret |
119 } | 124 } |
120 | 125 |
121 define void @atomic_fetch_nand64(i64 %x) nounwind { | 126 define void @atomic_fetch_nand64(i64 %x) nounwind { |
122 ; X32: atomic_fetch_nand64 | 127 ; X64-LABEL: atomic_fetch_nand64: |
128 ; X32-LABEL: atomic_fetch_nand64: | |
123 %t1 = atomicrmw nand i64* @sc64, i64 %x acquire | 129 %t1 = atomicrmw nand i64* @sc64, i64 %x acquire |
124 ; X32: andl | 130 ; X32: andl |
125 ; X32: andl | 131 ; X32: andl |
126 ; X32: notl | 132 ; X32: notl |
127 ; X32: notl | 133 ; X32: notl |
130 ret void | 136 ret void |
131 ; X32: ret | 137 ; X32: ret |
132 } | 138 } |
133 | 139 |
134 define void @atomic_fetch_max64(i64 %x) nounwind { | 140 define void @atomic_fetch_max64(i64 %x) nounwind { |
141 ; X64-LABEL: atomic_fetch_max:64 | |
142 ; X32-LABEL: atomic_fetch_max64: | |
135 %t1 = atomicrmw max i64* @sc64, i64 %x acquire | 143 %t1 = atomicrmw max i64* @sc64, i64 %x acquire |
136 ; X32: cmpl | 144 ; X32: subl |
137 ; X32: cmpl | 145 ; X32: subl |
138 ; X32: cmov | |
139 ; X32: cmov | 146 ; X32: cmov |
140 ; X32: cmov | 147 ; X32: cmov |
141 ; X32: lock | 148 ; X32: lock |
142 ; X32: cmpxchg8b | 149 ; X32: cmpxchg8b |
143 ret void | 150 ret void |
144 ; X32: ret | 151 ; X32: ret |
145 } | 152 } |
146 | 153 |
147 define void @atomic_fetch_min64(i64 %x) nounwind { | 154 define void @atomic_fetch_min64(i64 %x) nounwind { |
155 ; X64-LABEL: atomic_fetch_min64: | |
156 ; X32-LABEL: atomic_fetch_min64: | |
148 %t1 = atomicrmw min i64* @sc64, i64 %x acquire | 157 %t1 = atomicrmw min i64* @sc64, i64 %x acquire |
149 ; X32: cmpl | 158 ; X32: subl |
150 ; X32: cmpl | 159 ; X32: subl |
151 ; X32: cmov | |
152 ; X32: cmov | 160 ; X32: cmov |
153 ; X32: cmov | 161 ; X32: cmov |
154 ; X32: lock | 162 ; X32: lock |
155 ; X32: cmpxchg8b | 163 ; X32: cmpxchg8b |
156 ret void | 164 ret void |
157 ; X32: ret | 165 ; X32: ret |
158 } | 166 } |
159 | 167 |
160 define void @atomic_fetch_umax64(i64 %x) nounwind { | 168 define void @atomic_fetch_umax64(i64 %x) nounwind { |
169 ; X64-LABEL: atomic_fetch_umax:64 | |
170 ; X32-LABEL: atomic_fetch_umax64: | |
161 %t1 = atomicrmw umax i64* @sc64, i64 %x acquire | 171 %t1 = atomicrmw umax i64* @sc64, i64 %x acquire |
162 ; X32: cmpl | 172 ; X32: subl |
163 ; X32: cmpl | 173 ; X32: subl |
164 ; X32: cmov | |
165 ; X32: cmov | 174 ; X32: cmov |
166 ; X32: cmov | 175 ; X32: cmov |
167 ; X32: lock | 176 ; X32: lock |
168 ; X32: cmpxchg8b | 177 ; X32: cmpxchg8b |
169 ret void | 178 ret void |
170 ; X32: ret | 179 ; X32: ret |
171 } | 180 } |
172 | 181 |
173 define void @atomic_fetch_umin64(i64 %x) nounwind { | 182 define void @atomic_fetch_umin64(i64 %x) nounwind { |
183 ; X64-LABEL: atomic_fetch_umin64: | |
184 ; X32-LABEL: atomic_fetch_umin64: | |
174 %t1 = atomicrmw umin i64* @sc64, i64 %x acquire | 185 %t1 = atomicrmw umin i64* @sc64, i64 %x acquire |
175 ; X32: cmpl | 186 ; X32: subl |
176 ; X32: cmpl | 187 ; X32: subl |
177 ; X32: cmov | |
178 ; X32: cmov | 188 ; X32: cmov |
179 ; X32: cmov | 189 ; X32: cmov |
180 ; X32: lock | 190 ; X32: lock |
181 ; X32: cmpxchg8b | 191 ; X32: cmpxchg8b |
182 ret void | 192 ret void |
183 ; X32: ret | 193 ; X32: ret |
184 } | 194 } |
185 | 195 |
186 define void @atomic_fetch_cmpxchg64() nounwind { | 196 define void @atomic_fetch_cmpxchg64() nounwind { |
187 %t1 = cmpxchg i64* @sc64, i64 0, i64 1 acquire | 197 ; X64-LABEL: atomic_fetch_cmpxchg:64 |
198 ; X32-LABEL: atomic_fetch_cmpxchg64: | |
199 %t1 = cmpxchg i64* @sc64, i64 0, i64 1 acquire acquire | |
188 ; X32: lock | 200 ; X32: lock |
189 ; X32: cmpxchg8b | 201 ; X32: cmpxchg8b |
190 ret void | 202 ret void |
191 ; X32: ret | 203 ; X32: ret |
192 } | 204 } |
193 | 205 |
194 define void @atomic_fetch_store64(i64 %x) nounwind { | 206 define void @atomic_fetch_store64(i64 %x) nounwind { |
207 ; X64-LABEL: atomic_fetch_store64: | |
208 ; X32-LABEL: atomic_fetch_store64: | |
195 store atomic i64 %x, i64* @sc64 release, align 8 | 209 store atomic i64 %x, i64* @sc64 release, align 8 |
196 ; X32: lock | 210 ; X32: lock |
197 ; X32: cmpxchg8b | 211 ; X32: cmpxchg8b |
198 ret void | 212 ret void |
199 ; X32: ret | 213 ; X32: ret |
200 } | 214 } |
201 | 215 |
202 define void @atomic_fetch_swap64(i64 %x) nounwind { | 216 define void @atomic_fetch_swap64(i64 %x) nounwind { |
217 ; X64-LABEL: atomic_fetch_swap64: | |
218 ; X32-LABEL: atomic_fetch_swap64: | |
203 %t1 = atomicrmw xchg i64* @sc64, i64 %x acquire | 219 %t1 = atomicrmw xchg i64* @sc64, i64 %x acquire |
204 ; X32: lock | 220 ; X32: lock |
205 ; X32: xchg8b | 221 ; X32: xchg8b |
206 ret void | 222 ret void |
207 ; X32: ret | 223 ; X32: ret |