comparison test/CodeGen/AArch64/arm64-atomic.ll @ 95:afa8332a0e37 LLVM3.8

LLVM 3.8
author Kaito Tokumori <e105711@ie.u-ryukyu.ac.jp>
date Tue, 13 Oct 2015 17:48:58 +0900
parents 60c9769439b8
children 1172e4bd9c6f
comparison
equal deleted inserted replaced
84:f3e34b893a5f 95:afa8332a0e37
1 ; RUN: llc < %s -march=arm64 -verify-machineinstrs -mcpu=cyclone | FileCheck %s 1 ; RUN: llc < %s -march=arm64 -asm-verbose=false -verify-machineinstrs -mcpu=cyclone | FileCheck %s
2 2
3 define i32 @val_compare_and_swap(i32* %p) { 3 define i32 @val_compare_and_swap(i32* %p, i32 %cmp, i32 %new) #0 {
4 ; CHECK-LABEL: val_compare_and_swap: 4 ; CHECK-LABEL: val_compare_and_swap:
5 ; CHECK: orr [[NEWVAL_REG:w[0-9]+]], wzr, #0x4 5 ; CHECK-NEXT: mov x[[ADDR:[0-9]+]], x0
6 ; CHECK: [[LABEL:.?LBB[0-9]+_[0-9]+]]: 6 ; CHECK-NEXT: [[TRYBB:.?LBB[0-9_]+]]:
7 ; CHECK: ldaxr [[RESULT:w[0-9]+]], [x0] 7 ; CHECK-NEXT: ldaxr [[RESULT:w[0-9]+]], [x[[ADDR]]]
8 ; CHECK: cmp [[RESULT]], #7 8 ; CHECK-NEXT: cmp [[RESULT]], w1
9 ; CHECK: b.ne [[LABEL2:.?LBB[0-9]+_[0-9]+]] 9 ; CHECK-NEXT: b.ne [[FAILBB:.?LBB[0-9_]+]]
10 ; CHECK: stxr [[SCRATCH_REG:w[0-9]+]], [[NEWVAL_REG]], [x0] 10 ; CHECK-NEXT: stxr [[SCRATCH_REG:w[0-9]+]], w2, [x[[ADDR]]]
11 ; CHECK: cbnz [[SCRATCH_REG]], [[LABEL]] 11 ; CHECK-NEXT: cbnz [[SCRATCH_REG]], [[TRYBB]]
12 ; CHECK: [[LABEL2]]: 12 ; CHECK-NEXT: b [[EXITBB:.?LBB[0-9_]+]]
13 %pair = cmpxchg i32* %p, i32 7, i32 4 acquire acquire 13 ; CHECK-NEXT: [[FAILBB]]:
14 ; CHECK-NEXT: clrex
15 ; CHECK-NEXT: [[EXITBB]]:
16 %pair = cmpxchg i32* %p, i32 %cmp, i32 %new acquire acquire
14 %val = extractvalue { i32, i1 } %pair, 0 17 %val = extractvalue { i32, i1 } %pair, 0
15 ret i32 %val 18 ret i32 %val
16 } 19 }
17 20
18 define i64 @val_compare_and_swap_64(i64* %p) { 21 define i32 @val_compare_and_swap_from_load(i32* %p, i32 %cmp, i32* %pnew) #0 {
22 ; CHECK-LABEL: val_compare_and_swap_from_load:
23 ; CHECK-NEXT: ldr [[NEW:w[0-9]+]], [x2]
24 ; CHECK-NEXT: [[TRYBB:.?LBB[0-9_]+]]:
25 ; CHECK-NEXT: ldaxr [[RESULT:w[0-9]+]], [x0]
26 ; CHECK-NEXT: cmp [[RESULT]], w1
27 ; CHECK-NEXT: b.ne [[FAILBB:.?LBB[0-9_]+]]
28 ; CHECK-NEXT: stxr [[SCRATCH_REG:w[0-9]+]], [[NEW]], [x0]
29 ; CHECK-NEXT: cbnz [[SCRATCH_REG]], [[TRYBB]]
30 ; CHECK-NEXT: b [[EXITBB:.?LBB[0-9_]+]]
31 ; CHECK-NEXT: [[FAILBB]]:
32 ; CHECK-NEXT: clrex
33 ; CHECK-NEXT: [[EXITBB]]:
34 %new = load i32, i32* %pnew
35 %pair = cmpxchg i32* %p, i32 %cmp, i32 %new acquire acquire
36 %val = extractvalue { i32, i1 } %pair, 0
37 ret i32 %val
38 }
39
40 define i32 @val_compare_and_swap_rel(i32* %p, i32 %cmp, i32 %new) #0 {
41 ; CHECK-LABEL: val_compare_and_swap_rel:
42 ; CHECK-NEXT: mov x[[ADDR:[0-9]+]], x0
43 ; CHECK-NEXT: [[TRYBB:.?LBB[0-9_]+]]:
44 ; CHECK-NEXT: ldaxr [[RESULT:w[0-9]+]], [x[[ADDR]]
45 ; CHECK-NEXT: cmp [[RESULT]], w1
46 ; CHECK-NEXT: b.ne [[FAILBB:.?LBB[0-9_]+]]
47 ; CHECK-NEXT: stlxr [[SCRATCH_REG:w[0-9]+]], w2, [x[[ADDR]]
48 ; CHECK-NEXT: cbnz [[SCRATCH_REG]], [[TRYBB]]
49 ; CHECK-NEXT: b [[EXITBB:.?LBB[0-9_]+]]
50 ; CHECK-NEXT: [[FAILBB]]:
51 ; CHECK-NEXT: clrex
52 ; CHECK-NEXT: [[EXITBB]]:
53 %pair = cmpxchg i32* %p, i32 %cmp, i32 %new acq_rel monotonic
54 %val = extractvalue { i32, i1 } %pair, 0
55 ret i32 %val
56 }
57
58 define i64 @val_compare_and_swap_64(i64* %p, i64 %cmp, i64 %new) #0 {
19 ; CHECK-LABEL: val_compare_and_swap_64: 59 ; CHECK-LABEL: val_compare_and_swap_64:
20 ; CHECK: orr w[[NEWVAL_REG:[0-9]+]], wzr, #0x4 60 ; CHECK-NEXT: mov x[[ADDR:[0-9]+]], x0
21 ; CHECK: [[LABEL:.?LBB[0-9]+_[0-9]+]]: 61 ; CHECK-NEXT: [[TRYBB:.?LBB[0-9_]+]]:
22 ; CHECK: ldxr [[RESULT:x[0-9]+]], [x0] 62 ; CHECK-NEXT: ldxr [[RESULT:x[0-9]+]], [x[[ADDR]]]
23 ; CHECK: cmp [[RESULT]], #7 63 ; CHECK-NEXT: cmp [[RESULT]], x1
24 ; CHECK: b.ne [[LABEL2:.?LBB[0-9]+_[0-9]+]] 64 ; CHECK-NEXT: b.ne [[FAILBB:.?LBB[0-9_]+]]
25 ; CHECK-NOT: stxr x[[NEWVAL_REG]], x[[NEWVAL_REG]] 65 ; CHECK-NEXT: stxr [[SCRATCH_REG:w[0-9]+]], x2, [x[[ADDR]]]
26 ; CHECK: stxr [[SCRATCH_REG:w[0-9]+]], x[[NEWVAL_REG]], [x0] 66 ; CHECK-NEXT: cbnz [[SCRATCH_REG]], [[TRYBB]]
27 ; CHECK: cbnz [[SCRATCH_REG]], [[LABEL]] 67 ; CHECK-NEXT: b [[EXITBB:.?LBB[0-9_]+]]
28 ; CHECK: [[LABEL2]]: 68 ; CHECK-NEXT: [[FAILBB]]:
29 %pair = cmpxchg i64* %p, i64 7, i64 4 monotonic monotonic 69 ; CHECK-NEXT: clrex
70 ; CHECK-NEXT: [[EXITBB]]:
71 %pair = cmpxchg i64* %p, i64 %cmp, i64 %new monotonic monotonic
30 %val = extractvalue { i64, i1 } %pair, 0 72 %val = extractvalue { i64, i1 } %pair, 0
31 ret i64 %val 73 ret i64 %val
32 } 74 }
33 75
34 define i32 @fetch_and_nand(i32* %p) { 76 define i32 @fetch_and_nand(i32* %p) #0 {
35 ; CHECK-LABEL: fetch_and_nand: 77 ; CHECK-LABEL: fetch_and_nand:
36 ; CHECK: [[LABEL:.?LBB[0-9]+_[0-9]+]]: 78 ; CHECK: [[TRYBB:.?LBB[0-9_]+]]:
37 ; CHECK: ldxr w[[DEST_REG:[0-9]+]], [x0] 79 ; CHECK: ldxr w[[DEST_REG:[0-9]+]], [x0]
38 ; CHECK: mvn [[TMP_REG:w[0-9]+]], w[[DEST_REG]] 80 ; CHECK: mvn [[TMP_REG:w[0-9]+]], w[[DEST_REG]]
39 ; CHECK: orr [[SCRATCH2_REG:w[0-9]+]], [[TMP_REG]], #0xfffffff8 81 ; CHECK: orr [[SCRATCH2_REG:w[0-9]+]], [[TMP_REG]], #0xfffffff8
40 ; CHECK-NOT: stlxr [[SCRATCH2_REG]], [[SCRATCH2_REG]] 82 ; CHECK-NOT: stlxr [[SCRATCH2_REG]], [[SCRATCH2_REG]]
41 ; CHECK: stlxr [[SCRATCH_REG:w[0-9]+]], [[SCRATCH2_REG]], [x0] 83 ; CHECK: stlxr [[SCRATCH_REG:w[0-9]+]], [[SCRATCH2_REG]], [x0]
42 ; CHECK: cbnz [[SCRATCH_REG]], [[LABEL]] 84 ; CHECK: cbnz [[SCRATCH_REG]], [[TRYBB]]
43 ; CHECK: mov x0, x[[DEST_REG]] 85 ; CHECK: mov x0, x[[DEST_REG]]
44 %val = atomicrmw nand i32* %p, i32 7 release 86 %val = atomicrmw nand i32* %p, i32 7 release
45 ret i32 %val 87 ret i32 %val
46 } 88 }
47 89
48 define i64 @fetch_and_nand_64(i64* %p) { 90 define i64 @fetch_and_nand_64(i64* %p) #0 {
49 ; CHECK-LABEL: fetch_and_nand_64: 91 ; CHECK-LABEL: fetch_and_nand_64:
50 ; CHECK: mov x[[ADDR:[0-9]+]], x0 92 ; CHECK: mov x[[ADDR:[0-9]+]], x0
51 ; CHECK: [[LABEL:.?LBB[0-9]+_[0-9]+]]: 93 ; CHECK: [[TRYBB:.?LBB[0-9_]+]]:
52 ; CHECK: ldaxr x[[DEST_REG:[0-9]+]], [x[[ADDR]]] 94 ; CHECK: ldaxr x[[DEST_REG:[0-9]+]], [x[[ADDR]]]
53 ; CHECK: mvn w[[TMP_REG:[0-9]+]], w[[DEST_REG]] 95 ; CHECK: mvn w[[TMP_REG:[0-9]+]], w[[DEST_REG]]
54 ; CHECK: orr [[SCRATCH2_REG:x[0-9]+]], x[[TMP_REG]], #0xfffffffffffffff8 96 ; CHECK: orr [[SCRATCH2_REG:x[0-9]+]], x[[TMP_REG]], #0xfffffffffffffff8
55 ; CHECK: stlxr [[SCRATCH_REG:w[0-9]+]], [[SCRATCH2_REG]], [x[[ADDR]]] 97 ; CHECK: stlxr [[SCRATCH_REG:w[0-9]+]], [[SCRATCH2_REG]], [x[[ADDR]]]
56 ; CHECK: cbnz [[SCRATCH_REG]], [[LABEL]] 98 ; CHECK: cbnz [[SCRATCH_REG]], [[TRYBB]]
57 99
58 %val = atomicrmw nand i64* %p, i64 7 acq_rel 100 %val = atomicrmw nand i64* %p, i64 7 acq_rel
59 ret i64 %val 101 ret i64 %val
60 } 102 }
61 103
62 define i32 @fetch_and_or(i32* %p) { 104 define i32 @fetch_and_or(i32* %p) #0 {
63 ; CHECK-LABEL: fetch_and_or: 105 ; CHECK-LABEL: fetch_and_or:
64 ; CHECK: movz [[OLDVAL_REG:w[0-9]+]], #0x5 106 ; CHECK: movz [[OLDVAL_REG:w[0-9]+]], #0x5
65 ; CHECK: [[LABEL:.?LBB[0-9]+_[0-9]+]]: 107 ; CHECK: [[TRYBB:.?LBB[0-9_]+]]:
66 ; CHECK: ldaxr w[[DEST_REG:[0-9]+]], [x0] 108 ; CHECK: ldaxr w[[DEST_REG:[0-9]+]], [x0]
67 ; CHECK: orr [[SCRATCH2_REG:w[0-9]+]], w[[DEST_REG]], [[OLDVAL_REG]] 109 ; CHECK: orr [[SCRATCH2_REG:w[0-9]+]], w[[DEST_REG]], [[OLDVAL_REG]]
68 ; CHECK-NOT: stlxr [[SCRATCH2_REG]], [[SCRATCH2_REG]] 110 ; CHECK-NOT: stlxr [[SCRATCH2_REG]], [[SCRATCH2_REG]]
69 ; CHECK: stlxr [[SCRATCH_REG:w[0-9]+]], [[SCRATCH2_REG]], [x0] 111 ; CHECK: stlxr [[SCRATCH_REG:w[0-9]+]], [[SCRATCH2_REG]], [x0]
70 ; CHECK: cbnz [[SCRATCH_REG]], [[LABEL]] 112 ; CHECK: cbnz [[SCRATCH_REG]], [[TRYBB]]
71 ; CHECK: mov x0, x[[DEST_REG]] 113 ; CHECK: mov x0, x[[DEST_REG]]
72 %val = atomicrmw or i32* %p, i32 5 seq_cst 114 %val = atomicrmw or i32* %p, i32 5 seq_cst
73 ret i32 %val 115 ret i32 %val
74 } 116 }
75 117
76 define i64 @fetch_and_or_64(i64* %p) { 118 define i64 @fetch_and_or_64(i64* %p) #0 {
77 ; CHECK: fetch_and_or_64: 119 ; CHECK: fetch_and_or_64:
78 ; CHECK: mov x[[ADDR:[0-9]+]], x0 120 ; CHECK: mov x[[ADDR:[0-9]+]], x0
79 ; CHECK: [[LABEL:.?LBB[0-9]+_[0-9]+]]: 121 ; CHECK: [[TRYBB:.?LBB[0-9_]+]]:
80 ; CHECK: ldxr [[DEST_REG:x[0-9]+]], [x[[ADDR]]] 122 ; CHECK: ldxr [[DEST_REG:x[0-9]+]], [x[[ADDR]]]
81 ; CHECK: orr [[SCRATCH2_REG:x[0-9]+]], [[DEST_REG]], #0x7 123 ; CHECK: orr [[SCRATCH2_REG:x[0-9]+]], [[DEST_REG]], #0x7
82 ; CHECK: stxr [[SCRATCH_REG:w[0-9]+]], [[SCRATCH2_REG]], [x[[ADDR]]] 124 ; CHECK: stxr [[SCRATCH_REG:w[0-9]+]], [[SCRATCH2_REG]], [x[[ADDR]]]
83 ; CHECK: cbnz [[SCRATCH_REG]], [[LABEL]] 125 ; CHECK: cbnz [[SCRATCH_REG]], [[TRYBB]]
84 %val = atomicrmw or i64* %p, i64 7 monotonic 126 %val = atomicrmw or i64* %p, i64 7 monotonic
85 ret i64 %val 127 ret i64 %val
86 } 128 }
87 129
88 define void @acquire_fence() { 130 define void @acquire_fence() #0 {
89 fence acquire 131 fence acquire
90 ret void 132 ret void
91 ; CHECK-LABEL: acquire_fence: 133 ; CHECK-LABEL: acquire_fence:
92 ; CHECK: dmb ishld 134 ; CHECK: dmb ishld
93 } 135 }
94 136
95 define void @release_fence() { 137 define void @release_fence() #0 {
96 fence release 138 fence release
97 ret void 139 ret void
98 ; CHECK-LABEL: release_fence: 140 ; CHECK-LABEL: release_fence:
99 ; CHECK: dmb ish{{$}} 141 ; CHECK: dmb ish{{$}}
100 } 142 }
101 143
102 define void @seq_cst_fence() { 144 define void @seq_cst_fence() #0 {
103 fence seq_cst 145 fence seq_cst
104 ret void 146 ret void
105 ; CHECK-LABEL: seq_cst_fence: 147 ; CHECK-LABEL: seq_cst_fence:
106 ; CHECK: dmb ish{{$}} 148 ; CHECK: dmb ish{{$}}
107 } 149 }
108 150
109 define i32 @atomic_load(i32* %p) { 151 define i32 @atomic_load(i32* %p) #0 {
110 %r = load atomic i32* %p seq_cst, align 4 152 %r = load atomic i32, i32* %p seq_cst, align 4
111 ret i32 %r 153 ret i32 %r
112 ; CHECK-LABEL: atomic_load: 154 ; CHECK-LABEL: atomic_load:
113 ; CHECK: ldar 155 ; CHECK: ldar
114 } 156 }
115 157
116 define i8 @atomic_load_relaxed_8(i8* %p, i32 %off32) { 158 define i8 @atomic_load_relaxed_8(i8* %p, i32 %off32) #0 {
117 ; CHECK-LABEL: atomic_load_relaxed_8: 159 ; CHECK-LABEL: atomic_load_relaxed_8:
118 %ptr_unsigned = getelementptr i8* %p, i32 4095 160 %ptr_unsigned = getelementptr i8, i8* %p, i32 4095
119 %val_unsigned = load atomic i8* %ptr_unsigned monotonic, align 1 161 %val_unsigned = load atomic i8, i8* %ptr_unsigned monotonic, align 1
120 ; CHECK: ldrb {{w[0-9]+}}, [x0, #4095] 162 ; CHECK: ldrb {{w[0-9]+}}, [x0, #4095]
121 163
122 %ptr_regoff = getelementptr i8* %p, i32 %off32 164 %ptr_regoff = getelementptr i8, i8* %p, i32 %off32
123 %val_regoff = load atomic i8* %ptr_regoff unordered, align 1 165 %val_regoff = load atomic i8, i8* %ptr_regoff unordered, align 1
124 %tot1 = add i8 %val_unsigned, %val_regoff 166 %tot1 = add i8 %val_unsigned, %val_regoff
125 ; CHECK: ldrb {{w[0-9]+}}, [x0, w1, sxtw] 167 ; CHECK: ldrb {{w[0-9]+}}, [x0, w1, sxtw]
126 168
127 %ptr_unscaled = getelementptr i8* %p, i32 -256 169 %ptr_unscaled = getelementptr i8, i8* %p, i32 -256
128 %val_unscaled = load atomic i8* %ptr_unscaled monotonic, align 1 170 %val_unscaled = load atomic i8, i8* %ptr_unscaled monotonic, align 1
129 %tot2 = add i8 %tot1, %val_unscaled 171 %tot2 = add i8 %tot1, %val_unscaled
130 ; CHECK: ldurb {{w[0-9]+}}, [x0, #-256] 172 ; CHECK: ldurb {{w[0-9]+}}, [x0, #-256]
131 173
132 %ptr_random = getelementptr i8* %p, i32 1191936 ; 0x123000 (i.e. ADD imm) 174 %ptr_random = getelementptr i8, i8* %p, i32 1191936 ; 0x123000 (i.e. ADD imm)
133 %val_random = load atomic i8* %ptr_random unordered, align 1 175 %val_random = load atomic i8, i8* %ptr_random unordered, align 1
134 %tot3 = add i8 %tot2, %val_random 176 %tot3 = add i8 %tot2, %val_random
135 ; CHECK: add x[[ADDR:[0-9]+]], x0, #291, lsl #12 177 ; CHECK: add x[[ADDR:[0-9]+]], x0, #291, lsl #12
136 ; CHECK: ldrb {{w[0-9]+}}, [x[[ADDR]]] 178 ; CHECK: ldrb {{w[0-9]+}}, [x[[ADDR]]]
137 179
138 ret i8 %tot3 180 ret i8 %tot3
139 } 181 }
140 182
141 define i16 @atomic_load_relaxed_16(i16* %p, i32 %off32) { 183 define i16 @atomic_load_relaxed_16(i16* %p, i32 %off32) #0 {
142 ; CHECK-LABEL: atomic_load_relaxed_16: 184 ; CHECK-LABEL: atomic_load_relaxed_16:
143 %ptr_unsigned = getelementptr i16* %p, i32 4095 185 %ptr_unsigned = getelementptr i16, i16* %p, i32 4095
144 %val_unsigned = load atomic i16* %ptr_unsigned monotonic, align 2 186 %val_unsigned = load atomic i16, i16* %ptr_unsigned monotonic, align 2
145 ; CHECK: ldrh {{w[0-9]+}}, [x0, #8190] 187 ; CHECK: ldrh {{w[0-9]+}}, [x0, #8190]
146 188
147 %ptr_regoff = getelementptr i16* %p, i32 %off32 189 %ptr_regoff = getelementptr i16, i16* %p, i32 %off32
148 %val_regoff = load atomic i16* %ptr_regoff unordered, align 2 190 %val_regoff = load atomic i16, i16* %ptr_regoff unordered, align 2
149 %tot1 = add i16 %val_unsigned, %val_regoff 191 %tot1 = add i16 %val_unsigned, %val_regoff
150 ; CHECK: ldrh {{w[0-9]+}}, [x0, w1, sxtw #1] 192 ; CHECK: ldrh {{w[0-9]+}}, [x0, w1, sxtw #1]
151 193
152 %ptr_unscaled = getelementptr i16* %p, i32 -128 194 %ptr_unscaled = getelementptr i16, i16* %p, i32 -128
153 %val_unscaled = load atomic i16* %ptr_unscaled monotonic, align 2 195 %val_unscaled = load atomic i16, i16* %ptr_unscaled monotonic, align 2
154 %tot2 = add i16 %tot1, %val_unscaled 196 %tot2 = add i16 %tot1, %val_unscaled
155 ; CHECK: ldurh {{w[0-9]+}}, [x0, #-256] 197 ; CHECK: ldurh {{w[0-9]+}}, [x0, #-256]
156 198
157 %ptr_random = getelementptr i16* %p, i32 595968 ; 0x123000/2 (i.e. ADD imm) 199 %ptr_random = getelementptr i16, i16* %p, i32 595968 ; 0x123000/2 (i.e. ADD imm)
158 %val_random = load atomic i16* %ptr_random unordered, align 2 200 %val_random = load atomic i16, i16* %ptr_random unordered, align 2
159 %tot3 = add i16 %tot2, %val_random 201 %tot3 = add i16 %tot2, %val_random
160 ; CHECK: add x[[ADDR:[0-9]+]], x0, #291, lsl #12 202 ; CHECK: add x[[ADDR:[0-9]+]], x0, #291, lsl #12
161 ; CHECK: ldrh {{w[0-9]+}}, [x[[ADDR]]] 203 ; CHECK: ldrh {{w[0-9]+}}, [x[[ADDR]]]
162 204
163 ret i16 %tot3 205 ret i16 %tot3
164 } 206 }
165 207
166 define i32 @atomic_load_relaxed_32(i32* %p, i32 %off32) { 208 define i32 @atomic_load_relaxed_32(i32* %p, i32 %off32) #0 {
167 ; CHECK-LABEL: atomic_load_relaxed_32: 209 ; CHECK-LABEL: atomic_load_relaxed_32:
168 %ptr_unsigned = getelementptr i32* %p, i32 4095 210 %ptr_unsigned = getelementptr i32, i32* %p, i32 4095
169 %val_unsigned = load atomic i32* %ptr_unsigned monotonic, align 4 211 %val_unsigned = load atomic i32, i32* %ptr_unsigned monotonic, align 4
170 ; CHECK: ldr {{w[0-9]+}}, [x0, #16380] 212 ; CHECK: ldr {{w[0-9]+}}, [x0, #16380]
171 213
172 %ptr_regoff = getelementptr i32* %p, i32 %off32 214 %ptr_regoff = getelementptr i32, i32* %p, i32 %off32
173 %val_regoff = load atomic i32* %ptr_regoff unordered, align 4 215 %val_regoff = load atomic i32, i32* %ptr_regoff unordered, align 4
174 %tot1 = add i32 %val_unsigned, %val_regoff 216 %tot1 = add i32 %val_unsigned, %val_regoff
175 ; CHECK: ldr {{w[0-9]+}}, [x0, w1, sxtw #2] 217 ; CHECK: ldr {{w[0-9]+}}, [x0, w1, sxtw #2]
176 218
177 %ptr_unscaled = getelementptr i32* %p, i32 -64 219 %ptr_unscaled = getelementptr i32, i32* %p, i32 -64
178 %val_unscaled = load atomic i32* %ptr_unscaled monotonic, align 4 220 %val_unscaled = load atomic i32, i32* %ptr_unscaled monotonic, align 4
179 %tot2 = add i32 %tot1, %val_unscaled 221 %tot2 = add i32 %tot1, %val_unscaled
180 ; CHECK: ldur {{w[0-9]+}}, [x0, #-256] 222 ; CHECK: ldur {{w[0-9]+}}, [x0, #-256]
181 223
182 %ptr_random = getelementptr i32* %p, i32 297984 ; 0x123000/4 (i.e. ADD imm) 224 %ptr_random = getelementptr i32, i32* %p, i32 297984 ; 0x123000/4 (i.e. ADD imm)
183 %val_random = load atomic i32* %ptr_random unordered, align 4 225 %val_random = load atomic i32, i32* %ptr_random unordered, align 4
184 %tot3 = add i32 %tot2, %val_random 226 %tot3 = add i32 %tot2, %val_random
185 ; CHECK: add x[[ADDR:[0-9]+]], x0, #291, lsl #12 227 ; CHECK: add x[[ADDR:[0-9]+]], x0, #291, lsl #12
186 ; CHECK: ldr {{w[0-9]+}}, [x[[ADDR]]] 228 ; CHECK: ldr {{w[0-9]+}}, [x[[ADDR]]]
187 229
188 ret i32 %tot3 230 ret i32 %tot3
189 } 231 }
190 232
191 define i64 @atomic_load_relaxed_64(i64* %p, i32 %off32) { 233 define i64 @atomic_load_relaxed_64(i64* %p, i32 %off32) #0 {
192 ; CHECK-LABEL: atomic_load_relaxed_64: 234 ; CHECK-LABEL: atomic_load_relaxed_64:
193 %ptr_unsigned = getelementptr i64* %p, i32 4095 235 %ptr_unsigned = getelementptr i64, i64* %p, i32 4095
194 %val_unsigned = load atomic i64* %ptr_unsigned monotonic, align 8 236 %val_unsigned = load atomic i64, i64* %ptr_unsigned monotonic, align 8
195 ; CHECK: ldr {{x[0-9]+}}, [x0, #32760] 237 ; CHECK: ldr {{x[0-9]+}}, [x0, #32760]
196 238
197 %ptr_regoff = getelementptr i64* %p, i32 %off32 239 %ptr_regoff = getelementptr i64, i64* %p, i32 %off32
198 %val_regoff = load atomic i64* %ptr_regoff unordered, align 8 240 %val_regoff = load atomic i64, i64* %ptr_regoff unordered, align 8
199 %tot1 = add i64 %val_unsigned, %val_regoff 241 %tot1 = add i64 %val_unsigned, %val_regoff
200 ; CHECK: ldr {{x[0-9]+}}, [x0, w1, sxtw #3] 242 ; CHECK: ldr {{x[0-9]+}}, [x0, w1, sxtw #3]
201 243
202 %ptr_unscaled = getelementptr i64* %p, i32 -32 244 %ptr_unscaled = getelementptr i64, i64* %p, i32 -32
203 %val_unscaled = load atomic i64* %ptr_unscaled monotonic, align 8 245 %val_unscaled = load atomic i64, i64* %ptr_unscaled monotonic, align 8
204 %tot2 = add i64 %tot1, %val_unscaled 246 %tot2 = add i64 %tot1, %val_unscaled
205 ; CHECK: ldur {{x[0-9]+}}, [x0, #-256] 247 ; CHECK: ldur {{x[0-9]+}}, [x0, #-256]
206 248
207 %ptr_random = getelementptr i64* %p, i32 148992 ; 0x123000/8 (i.e. ADD imm) 249 %ptr_random = getelementptr i64, i64* %p, i32 148992 ; 0x123000/8 (i.e. ADD imm)
208 %val_random = load atomic i64* %ptr_random unordered, align 8 250 %val_random = load atomic i64, i64* %ptr_random unordered, align 8
209 %tot3 = add i64 %tot2, %val_random 251 %tot3 = add i64 %tot2, %val_random
210 ; CHECK: add x[[ADDR:[0-9]+]], x0, #291, lsl #12 252 ; CHECK: add x[[ADDR:[0-9]+]], x0, #291, lsl #12
211 ; CHECK: ldr {{x[0-9]+}}, [x[[ADDR]]] 253 ; CHECK: ldr {{x[0-9]+}}, [x[[ADDR]]]
212 254
213 ret i64 %tot3 255 ret i64 %tot3
214 } 256 }
215 257
216 258
217 define void @atomc_store(i32* %p) { 259 define void @atomc_store(i32* %p) #0 {
218 store atomic i32 4, i32* %p seq_cst, align 4 260 store atomic i32 4, i32* %p seq_cst, align 4
219 ret void 261 ret void
220 ; CHECK-LABEL: atomc_store: 262 ; CHECK-LABEL: atomc_store:
221 ; CHECK: stlr 263 ; CHECK: stlr
222 } 264 }
223 265
224 define void @atomic_store_relaxed_8(i8* %p, i32 %off32, i8 %val) { 266 define void @atomic_store_relaxed_8(i8* %p, i32 %off32, i8 %val) #0 {
225 ; CHECK-LABEL: atomic_store_relaxed_8: 267 ; CHECK-LABEL: atomic_store_relaxed_8:
226 %ptr_unsigned = getelementptr i8* %p, i32 4095 268 %ptr_unsigned = getelementptr i8, i8* %p, i32 4095
227 store atomic i8 %val, i8* %ptr_unsigned monotonic, align 1 269 store atomic i8 %val, i8* %ptr_unsigned monotonic, align 1
228 ; CHECK: strb {{w[0-9]+}}, [x0, #4095] 270 ; CHECK: strb {{w[0-9]+}}, [x0, #4095]
229 271
230 %ptr_regoff = getelementptr i8* %p, i32 %off32 272 %ptr_regoff = getelementptr i8, i8* %p, i32 %off32
231 store atomic i8 %val, i8* %ptr_regoff unordered, align 1 273 store atomic i8 %val, i8* %ptr_regoff unordered, align 1
232 ; CHECK: strb {{w[0-9]+}}, [x0, w1, sxtw] 274 ; CHECK: strb {{w[0-9]+}}, [x0, w1, sxtw]
233 275
234 %ptr_unscaled = getelementptr i8* %p, i32 -256 276 %ptr_unscaled = getelementptr i8, i8* %p, i32 -256
235 store atomic i8 %val, i8* %ptr_unscaled monotonic, align 1 277 store atomic i8 %val, i8* %ptr_unscaled monotonic, align 1
236 ; CHECK: sturb {{w[0-9]+}}, [x0, #-256] 278 ; CHECK: sturb {{w[0-9]+}}, [x0, #-256]
237 279
238 %ptr_random = getelementptr i8* %p, i32 1191936 ; 0x123000 (i.e. ADD imm) 280 %ptr_random = getelementptr i8, i8* %p, i32 1191936 ; 0x123000 (i.e. ADD imm)
239 store atomic i8 %val, i8* %ptr_random unordered, align 1 281 store atomic i8 %val, i8* %ptr_random unordered, align 1
240 ; CHECK: add x[[ADDR:[0-9]+]], x0, #291, lsl #12 282 ; CHECK: add x[[ADDR:[0-9]+]], x0, #291, lsl #12
241 ; CHECK: strb {{w[0-9]+}}, [x[[ADDR]]] 283 ; CHECK: strb {{w[0-9]+}}, [x[[ADDR]]]
242 284
243 ret void 285 ret void
244 } 286 }
245 287
246 define void @atomic_store_relaxed_16(i16* %p, i32 %off32, i16 %val) { 288 define void @atomic_store_relaxed_16(i16* %p, i32 %off32, i16 %val) #0 {
247 ; CHECK-LABEL: atomic_store_relaxed_16: 289 ; CHECK-LABEL: atomic_store_relaxed_16:
248 %ptr_unsigned = getelementptr i16* %p, i32 4095 290 %ptr_unsigned = getelementptr i16, i16* %p, i32 4095
249 store atomic i16 %val, i16* %ptr_unsigned monotonic, align 2 291 store atomic i16 %val, i16* %ptr_unsigned monotonic, align 2
250 ; CHECK: strh {{w[0-9]+}}, [x0, #8190] 292 ; CHECK: strh {{w[0-9]+}}, [x0, #8190]
251 293
252 %ptr_regoff = getelementptr i16* %p, i32 %off32 294 %ptr_regoff = getelementptr i16, i16* %p, i32 %off32
253 store atomic i16 %val, i16* %ptr_regoff unordered, align 2 295 store atomic i16 %val, i16* %ptr_regoff unordered, align 2
254 ; CHECK: strh {{w[0-9]+}}, [x0, w1, sxtw #1] 296 ; CHECK: strh {{w[0-9]+}}, [x0, w1, sxtw #1]
255 297
256 %ptr_unscaled = getelementptr i16* %p, i32 -128 298 %ptr_unscaled = getelementptr i16, i16* %p, i32 -128
257 store atomic i16 %val, i16* %ptr_unscaled monotonic, align 2 299 store atomic i16 %val, i16* %ptr_unscaled monotonic, align 2
258 ; CHECK: sturh {{w[0-9]+}}, [x0, #-256] 300 ; CHECK: sturh {{w[0-9]+}}, [x0, #-256]
259 301
260 %ptr_random = getelementptr i16* %p, i32 595968 ; 0x123000/2 (i.e. ADD imm) 302 %ptr_random = getelementptr i16, i16* %p, i32 595968 ; 0x123000/2 (i.e. ADD imm)
261 store atomic i16 %val, i16* %ptr_random unordered, align 2 303 store atomic i16 %val, i16* %ptr_random unordered, align 2
262 ; CHECK: add x[[ADDR:[0-9]+]], x0, #291, lsl #12 304 ; CHECK: add x[[ADDR:[0-9]+]], x0, #291, lsl #12
263 ; CHECK: strh {{w[0-9]+}}, [x[[ADDR]]] 305 ; CHECK: strh {{w[0-9]+}}, [x[[ADDR]]]
264 306
265 ret void 307 ret void
266 } 308 }
267 309
268 define void @atomic_store_relaxed_32(i32* %p, i32 %off32, i32 %val) { 310 define void @atomic_store_relaxed_32(i32* %p, i32 %off32, i32 %val) #0 {
269 ; CHECK-LABEL: atomic_store_relaxed_32: 311 ; CHECK-LABEL: atomic_store_relaxed_32:
270 %ptr_unsigned = getelementptr i32* %p, i32 4095 312 %ptr_unsigned = getelementptr i32, i32* %p, i32 4095
271 store atomic i32 %val, i32* %ptr_unsigned monotonic, align 4 313 store atomic i32 %val, i32* %ptr_unsigned monotonic, align 4
272 ; CHECK: str {{w[0-9]+}}, [x0, #16380] 314 ; CHECK: str {{w[0-9]+}}, [x0, #16380]
273 315
274 %ptr_regoff = getelementptr i32* %p, i32 %off32 316 %ptr_regoff = getelementptr i32, i32* %p, i32 %off32
275 store atomic i32 %val, i32* %ptr_regoff unordered, align 4 317 store atomic i32 %val, i32* %ptr_regoff unordered, align 4
276 ; CHECK: str {{w[0-9]+}}, [x0, w1, sxtw #2] 318 ; CHECK: str {{w[0-9]+}}, [x0, w1, sxtw #2]
277 319
278 %ptr_unscaled = getelementptr i32* %p, i32 -64 320 %ptr_unscaled = getelementptr i32, i32* %p, i32 -64
279 store atomic i32 %val, i32* %ptr_unscaled monotonic, align 4 321 store atomic i32 %val, i32* %ptr_unscaled monotonic, align 4
280 ; CHECK: stur {{w[0-9]+}}, [x0, #-256] 322 ; CHECK: stur {{w[0-9]+}}, [x0, #-256]
281 323
282 %ptr_random = getelementptr i32* %p, i32 297984 ; 0x123000/4 (i.e. ADD imm) 324 %ptr_random = getelementptr i32, i32* %p, i32 297984 ; 0x123000/4 (i.e. ADD imm)
283 store atomic i32 %val, i32* %ptr_random unordered, align 4 325 store atomic i32 %val, i32* %ptr_random unordered, align 4
284 ; CHECK: add x[[ADDR:[0-9]+]], x0, #291, lsl #12 326 ; CHECK: add x[[ADDR:[0-9]+]], x0, #291, lsl #12
285 ; CHECK: str {{w[0-9]+}}, [x[[ADDR]]] 327 ; CHECK: str {{w[0-9]+}}, [x[[ADDR]]]
286 328
287 ret void 329 ret void
288 } 330 }
289 331
290 define void @atomic_store_relaxed_64(i64* %p, i32 %off32, i64 %val) { 332 define void @atomic_store_relaxed_64(i64* %p, i32 %off32, i64 %val) #0 {
291 ; CHECK-LABEL: atomic_store_relaxed_64: 333 ; CHECK-LABEL: atomic_store_relaxed_64:
292 %ptr_unsigned = getelementptr i64* %p, i32 4095 334 %ptr_unsigned = getelementptr i64, i64* %p, i32 4095
293 store atomic i64 %val, i64* %ptr_unsigned monotonic, align 8 335 store atomic i64 %val, i64* %ptr_unsigned monotonic, align 8
294 ; CHECK: str {{x[0-9]+}}, [x0, #32760] 336 ; CHECK: str {{x[0-9]+}}, [x0, #32760]
295 337
296 %ptr_regoff = getelementptr i64* %p, i32 %off32 338 %ptr_regoff = getelementptr i64, i64* %p, i32 %off32
297 store atomic i64 %val, i64* %ptr_regoff unordered, align 8 339 store atomic i64 %val, i64* %ptr_regoff unordered, align 8
298 ; CHECK: str {{x[0-9]+}}, [x0, w1, sxtw #3] 340 ; CHECK: str {{x[0-9]+}}, [x0, w1, sxtw #3]
299 341
300 %ptr_unscaled = getelementptr i64* %p, i32 -32 342 %ptr_unscaled = getelementptr i64, i64* %p, i32 -32
301 store atomic i64 %val, i64* %ptr_unscaled monotonic, align 8 343 store atomic i64 %val, i64* %ptr_unscaled monotonic, align 8
302 ; CHECK: stur {{x[0-9]+}}, [x0, #-256] 344 ; CHECK: stur {{x[0-9]+}}, [x0, #-256]
303 345
304 %ptr_random = getelementptr i64* %p, i32 148992 ; 0x123000/8 (i.e. ADD imm) 346 %ptr_random = getelementptr i64, i64* %p, i32 148992 ; 0x123000/8 (i.e. ADD imm)
305 store atomic i64 %val, i64* %ptr_random unordered, align 8 347 store atomic i64 %val, i64* %ptr_random unordered, align 8
306 ; CHECK: add x[[ADDR:[0-9]+]], x0, #291, lsl #12 348 ; CHECK: add x[[ADDR:[0-9]+]], x0, #291, lsl #12
307 ; CHECK: str {{x[0-9]+}}, [x[[ADDR]]] 349 ; CHECK: str {{x[0-9]+}}, [x[[ADDR]]]
308 350
309 ret void 351 ret void
317 359
318 @counter = external hidden global %"class.X::Atomic", align 4 360 @counter = external hidden global %"class.X::Atomic", align 4
319 361
320 define i32 @next_id() nounwind optsize ssp align 2 { 362 define i32 @next_id() nounwind optsize ssp align 2 {
321 entry: 363 entry:
322 %0 = atomicrmw add i32* getelementptr inbounds (%"class.X::Atomic"* @counter, i64 0, i32 0, i32 0), i32 1 seq_cst 364 %0 = atomicrmw add i32* getelementptr inbounds (%"class.X::Atomic", %"class.X::Atomic"* @counter, i64 0, i32 0, i32 0), i32 1 seq_cst
323 %add.i = add i32 %0, 1 365 %add.i = add i32 %0, 1
324 %tobool = icmp eq i32 %add.i, 0 366 %tobool = icmp eq i32 %add.i, 0
325 br i1 %tobool, label %if.else, label %return 367 br i1 %tobool, label %if.else, label %return
326 368
327 if.else: ; preds = %entry 369 if.else: ; preds = %entry
328 %1 = atomicrmw add i32* getelementptr inbounds (%"class.X::Atomic"* @counter, i64 0, i32 0, i32 0), i32 1 seq_cst 370 %1 = atomicrmw add i32* getelementptr inbounds (%"class.X::Atomic", %"class.X::Atomic"* @counter, i64 0, i32 0, i32 0), i32 1 seq_cst
329 %add.i2 = add i32 %1, 1 371 %add.i2 = add i32 %1, 1
330 br label %return 372 br label %return
331 373
332 return: ; preds = %if.else, %entry 374 return: ; preds = %if.else, %entry
333 %retval.0 = phi i32 [ %add.i2, %if.else ], [ %add.i, %entry ] 375 %retval.0 = phi i32 [ %add.i2, %if.else ], [ %add.i, %entry ]
334 ret i32 %retval.0 376 ret i32 %retval.0
335 } 377 }
378
379 attributes #0 = { nounwind }