150
|
1 // RUN: %clang_cc1 < %s -triple armv5e-none-linux-gnueabi -emit-llvm -O1 | FileCheck %s
|
|
2
|
|
3 // FIXME: This file should not be checking -O1 output.
|
|
4 // Ie, it is testing many IR optimizer passes as part of front-end verification.
|
|
5
|
|
6 enum memory_order {
|
|
7 memory_order_relaxed, memory_order_consume, memory_order_acquire,
|
|
8 memory_order_release, memory_order_acq_rel, memory_order_seq_cst
|
|
9 };
|
|
10
|
|
11 int *test_c11_atomic_fetch_add_int_ptr(_Atomic(int *) *p) {
|
|
12 // CHECK: test_c11_atomic_fetch_add_int_ptr
|
|
13 // CHECK: {{%[^ ]*}} = call i32 @__atomic_fetch_add_4(i8* {{%[0-9]+}}, i32 12, i32 5)
|
|
14 return __c11_atomic_fetch_add(p, 3, memory_order_seq_cst);
|
|
15 }
|
|
16
|
|
17 int *test_c11_atomic_fetch_sub_int_ptr(_Atomic(int *) *p) {
|
|
18 // CHECK: test_c11_atomic_fetch_sub_int_ptr
|
|
19 // CHECK: {{%[^ ]*}} = call i32 @__atomic_fetch_sub_4(i8* {{%[0-9]+}}, i32 20, i32 5)
|
|
20 return __c11_atomic_fetch_sub(p, 5, memory_order_seq_cst);
|
|
21 }
|
|
22
|
|
23 int test_c11_atomic_fetch_add_int(_Atomic(int) *p) {
|
|
24 // CHECK: test_c11_atomic_fetch_add_int
|
|
25 // CHECK: {{%[^ ]*}} = call i32 @__atomic_fetch_add_4(i8* {{%[0-9]+}}, i32 3, i32 5)
|
|
26 return __c11_atomic_fetch_add(p, 3, memory_order_seq_cst);
|
|
27 }
|
|
28
|
|
29 int test_c11_atomic_fetch_sub_int(_Atomic(int) *p) {
|
|
30 // CHECK: test_c11_atomic_fetch_sub_int
|
|
31 // CHECK: {{%[^ ]*}} = call i32 @__atomic_fetch_sub_4(i8* {{%[0-9]+}}, i32 5, i32 5)
|
|
32 return __c11_atomic_fetch_sub(p, 5, memory_order_seq_cst);
|
|
33 }
|
|
34
|
|
35 int *fp2a(int **p) {
|
|
36 // CHECK: @fp2a
|
|
37 // CHECK: {{%[^ ]*}} = call i32 @__atomic_fetch_sub_4(i8* {{%[0-9]+}}, i32 4, i32 0)
|
|
38 // Note, the GNU builtins do not multiply by sizeof(T)!
|
|
39 return __atomic_fetch_sub(p, 4, memory_order_relaxed);
|
|
40 }
|
|
41
|
|
42 int test_atomic_fetch_add(int *p) {
|
|
43 // CHECK: test_atomic_fetch_add
|
|
44 // CHECK: {{%[^ ]*}} = call i32 @__atomic_fetch_add_4(i8* {{%[0-9]+}}, i32 55, i32 5)
|
|
45 return __atomic_fetch_add(p, 55, memory_order_seq_cst);
|
|
46 }
|
|
47
|
|
48 int test_atomic_fetch_sub(int *p) {
|
|
49 // CHECK: test_atomic_fetch_sub
|
|
50 // CHECK: {{%[^ ]*}} = call i32 @__atomic_fetch_sub_4(i8* {{%[0-9]+}}, i32 55, i32 5)
|
|
51 return __atomic_fetch_sub(p, 55, memory_order_seq_cst);
|
|
52 }
|
|
53
|
|
54 int test_atomic_fetch_and(int *p) {
|
|
55 // CHECK: test_atomic_fetch_and
|
|
56 // CHECK: {{%[^ ]*}} = call i32 @__atomic_fetch_and_4(i8* {{%[0-9]+}}, i32 55, i32 5)
|
|
57 return __atomic_fetch_and(p, 55, memory_order_seq_cst);
|
|
58 }
|
|
59
|
|
60 int test_atomic_fetch_or(int *p) {
|
|
61 // CHECK: test_atomic_fetch_or
|
|
62 // CHECK: {{%[^ ]*}} = call i32 @__atomic_fetch_or_4(i8* {{%[0-9]+}}, i32 55, i32 5)
|
|
63 return __atomic_fetch_or(p, 55, memory_order_seq_cst);
|
|
64 }
|
|
65
|
|
66 int test_atomic_fetch_xor(int *p) {
|
|
67 // CHECK: test_atomic_fetch_xor
|
|
68 // CHECK: {{%[^ ]*}} = call i32 @__atomic_fetch_xor_4(i8* {{%[0-9]+}}, i32 55, i32 5)
|
|
69 return __atomic_fetch_xor(p, 55, memory_order_seq_cst);
|
|
70 }
|
|
71
|
|
72 int test_atomic_fetch_nand(int *p) {
|
|
73 // CHECK: test_atomic_fetch_nand
|
|
74 // CHECK: {{%[^ ]*}} = call i32 @__atomic_fetch_nand_4(i8* {{%[0-9]+}}, i32 55, i32 5)
|
|
75 return __atomic_fetch_nand(p, 55, memory_order_seq_cst);
|
|
76 }
|
|
77
|
|
78 int test_atomic_add_fetch(int *p) {
|
|
79 // CHECK: test_atomic_add_fetch
|
|
80 // CHECK: [[CALL:%[^ ]*]] = call i32 @__atomic_fetch_add_4(i8* {{%[0-9]+}}, i32 55, i32 5)
|
|
81 // CHECK: {{%[^ ]*}} = add i32 [[CALL]], 55
|
|
82 return __atomic_add_fetch(p, 55, memory_order_seq_cst);
|
|
83 }
|
|
84
|
|
85 int test_atomic_sub_fetch(int *p) {
|
|
86 // CHECK: test_atomic_sub_fetch
|
|
87 // CHECK: [[CALL:%[^ ]*]] = call i32 @__atomic_fetch_sub_4(i8* {{%[0-9]+}}, i32 55, i32 5)
|
|
88 // CHECK: {{%[^ ]*}} = add i32 [[CALL]], -55
|
|
89 return __atomic_sub_fetch(p, 55, memory_order_seq_cst);
|
|
90 }
|
|
91
|
|
92 int test_atomic_and_fetch(int *p) {
|
|
93 // CHECK: test_atomic_and_fetch
|
|
94 // CHECK: [[CALL:%[^ ]*]] = call i32 @__atomic_fetch_and_4(i8* {{%[0-9]+}}, i32 55, i32 5)
|
|
95 // CHECK: {{%[^ ]*}} = and i32 [[CALL]], 55
|
|
96 return __atomic_and_fetch(p, 55, memory_order_seq_cst);
|
|
97 }
|
|
98
|
|
99 int test_atomic_or_fetch(int *p) {
|
|
100 // CHECK: test_atomic_or_fetch
|
|
101 // CHECK: [[CALL:%[^ ]*]] = call i32 @__atomic_fetch_or_4(i8* {{%[0-9]+}}, i32 55, i32 5)
|
|
102 // CHECK: {{%[^ ]*}} = or i32 [[CALL]], 55
|
|
103 return __atomic_or_fetch(p, 55, memory_order_seq_cst);
|
|
104 }
|
|
105
|
|
106 int test_atomic_xor_fetch(int *p) {
|
|
107 // CHECK: test_atomic_xor_fetch
|
|
108 // CHECK: [[CALL:%[^ ]*]] = call i32 @__atomic_fetch_xor_4(i8* {{%[0-9]+}}, i32 55, i32 5)
|
|
109 // CHECK: {{%[^ ]*}} = xor i32 [[CALL]], 55
|
|
110 return __atomic_xor_fetch(p, 55, memory_order_seq_cst);
|
|
111 }
|
|
112
|
|
113 int test_atomic_nand_fetch(int *p) {
|
|
114 // CHECK: test_atomic_nand_fetch
|
|
115 // CHECK: [[CALL:%[^ ]*]] = call i32 @__atomic_fetch_nand_4(i8* {{%[0-9]+}}, i32 55, i32 5)
|
|
116 // FIXME: We should not be checking optimized IR. It changes independently of clang.
|
|
117 // FIXME-CHECK: [[AND:%[^ ]*]] = and i32 [[CALL]], 55
|
|
118 // FIXME-CHECK: {{%[^ ]*}} = xor i32 [[AND]], -1
|
|
119 return __atomic_nand_fetch(p, 55, memory_order_seq_cst);
|
|
120 }
|