150
|
1 // RUN: %clang_cc1 -triple "i686-unknown-unknown" -emit-llvm -x c %s -o - -O3 | FileCheck %s
|
|
2 // RUN: %clang_cc1 -triple "x86_64-unknown-unknown" -emit-llvm -x c %s -o - -O3 | FileCheck %s
|
|
3 // RUN: %clang_cc1 -triple "x86_64-mingw32" -emit-llvm -x c %s -o - -O3 | FileCheck %s
|
|
4
|
|
5 unsigned char test_addcb(unsigned char x, unsigned char y,
|
|
6 unsigned char carryin, unsigned char *z) {
|
|
7 // CHECK: @test_addcb
|
|
8 // CHECK: %{{.+}} = {{.*}} call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 %x, i8 %y)
|
|
9 // CHECK: %{{.+}} = extractvalue { i8, i1 } %{{.+}}, 1
|
|
10 // CHECK: %{{.+}} = extractvalue { i8, i1 } %{{.+}}, 0
|
|
11 // CHECK: %{{.+}} = {{.*}} call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 %{{.+}}, i8 %carryin)
|
|
12 // CHECK: %{{.+}} = extractvalue { i8, i1 } %{{.+}}, 1
|
|
13 // CHECK: %{{.+}} = extractvalue { i8, i1 } %{{.+}}, 0
|
|
14 // CHECK: %{{.+}} = or i1 %{{.+}}, %{{.+}}
|
|
15 // CHECK: %{{.+}} = zext i1 %{{.+}} to i8
|
|
16 // CHECK: store i8 %{{.+}}, i8* %z, align 1
|
|
17
|
|
18 unsigned char carryout;
|
|
19 *z = __builtin_addcb(x, y, carryin, &carryout);
|
|
20
|
|
21 return carryout;
|
|
22 }
|
|
23
|
|
24 unsigned short test_addcs(unsigned short x, unsigned short y,
|
|
25 unsigned short carryin, unsigned short *z) {
|
|
26 // CHECK: @test_addcs
|
|
27 // CHECK: %{{.+}} = {{.*}} call { i16, i1 } @llvm.uadd.with.overflow.i16(i16 %x, i16 %y)
|
|
28 // CHECK: %{{.+}} = extractvalue { i16, i1 } %{{.+}}, 1
|
|
29 // CHECK: %{{.+}} = extractvalue { i16, i1 } %{{.+}}, 0
|
|
30 // CHECK: %{{.+}} = {{.*}} call { i16, i1 } @llvm.uadd.with.overflow.i16(i16 %{{.+}}, i16 %carryin)
|
|
31 // CHECK: %{{.+}} = extractvalue { i16, i1 } %{{.+}}, 1
|
|
32 // CHECK: %{{.+}} = extractvalue { i16, i1 } %{{.+}}, 0
|
|
33 // CHECK: %{{.+}} = or i1 %{{.+}}, %{{.+}}
|
|
34 // CHECK: %{{.+}} = zext i1 %{{.+}} to i16
|
|
35 // CHECK: store i16 %{{.+}}, i16* %z, align 2
|
|
36
|
|
37 unsigned short carryout;
|
|
38 *z = __builtin_addcs(x, y, carryin, &carryout);
|
|
39
|
|
40 return carryout;
|
|
41 }
|
|
42
|
|
43 unsigned test_addc(unsigned x, unsigned y, unsigned carryin, unsigned *z) {
|
|
44 // CHECK: @test_addc
|
|
45 // CHECK: %{{.+}} = {{.*}} call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %x, i32 %y)
|
|
46 // CHECK: %{{.+}} = extractvalue { i32, i1 } %{{.+}}, 1
|
|
47 // CHECK: %{{.+}} = extractvalue { i32, i1 } %{{.+}}, 0
|
|
48 // CHECK: %{{.+}} = {{.*}} call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 %{{.+}}, i32 %carryin)
|
|
49 // CHECK: %{{.+}} = extractvalue { i32, i1 } %{{.+}}, 1
|
|
50 // CHECK: %{{.+}} = extractvalue { i32, i1 } %{{.+}}, 0
|
|
51 // CHECK: %{{.+}} = or i1 %{{.+}}, %{{.+}}
|
|
52 // CHECK: %{{.+}} = zext i1 %{{.+}} to i32
|
|
53 // CHECK: store i32 %{{.+}}, i32* %z, align 4
|
|
54 unsigned carryout;
|
|
55 *z = __builtin_addc(x, y, carryin, &carryout);
|
|
56
|
|
57 return carryout;
|
|
58 }
|
|
59
|
|
60 unsigned long test_addcl(unsigned long x, unsigned long y,
|
|
61 unsigned long carryin, unsigned long *z) {
|
|
62 // long is i32 on i686, i64 on x86_64.
|
|
63 // CHECK: @test_addcl([[UL:i32|i64]] %x
|
|
64 // CHECK: %{{.+}} = {{.*}} call { [[UL]], i1 } @llvm.uadd.with.overflow.[[UL]]([[UL]] %x, [[UL]] %y)
|
|
65 // CHECK: %{{.+}} = extractvalue { [[UL]], i1 } %{{.+}}, 1
|
|
66 // CHECK: %{{.+}} = extractvalue { [[UL]], i1 } %{{.+}}, 0
|
|
67 // CHECK: %{{.+}} = {{.*}} call { [[UL]], i1 } @llvm.uadd.with.overflow.[[UL]]([[UL]] %{{.+}}, [[UL]] %carryin)
|
|
68 // CHECK: %{{.+}} = extractvalue { [[UL]], i1 } %{{.+}}, 1
|
|
69 // CHECK: %{{.+}} = extractvalue { [[UL]], i1 } %{{.+}}, 0
|
|
70 // CHECK: %{{.+}} = or i1 %{{.+}}, %{{.+}}
|
|
71 // CHECK: %{{.+}} = zext i1 %{{.+}} to [[UL]]
|
|
72 // CHECK: store [[UL]] %{{.+}}, [[UL]]* %z
|
|
73 unsigned long carryout;
|
|
74 *z = __builtin_addcl(x, y, carryin, &carryout);
|
|
75
|
|
76 return carryout;
|
|
77 }
|
|
78
|
|
79 unsigned long long test_addcll(unsigned long long x, unsigned long long y,
|
|
80 unsigned long long carryin,
|
|
81 unsigned long long *z) {
|
|
82 // CHECK: @test_addcll
|
|
83 // CHECK: %{{.+}} = {{.*}} call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 %x, i64 %y)
|
|
84 // CHECK: %{{.+}} = extractvalue { i64, i1 } %{{.+}}, 1
|
|
85 // CHECK: %{{.+}} = extractvalue { i64, i1 } %{{.+}}, 0
|
|
86 // CHECK: %{{.+}} = {{.*}} call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 %{{.+}}, i64 %carryin)
|
|
87 // CHECK: %{{.+}} = extractvalue { i64, i1 } %{{.+}}, 1
|
|
88 // CHECK: %{{.+}} = extractvalue { i64, i1 } %{{.+}}, 0
|
|
89 // CHECK: %{{.+}} = or i1 %{{.+}}, %{{.+}}
|
|
90 // CHECK: %{{.+}} = zext i1 %{{.+}} to i64
|
|
91 // CHECK: store i64 %{{.+}}, i64* %z
|
|
92 unsigned long long carryout;
|
|
93 *z = __builtin_addcll(x, y, carryin, &carryout);
|
|
94
|
|
95 return carryout;
|
|
96 }
|
|
97
|
|
98 unsigned char test_subcb(unsigned char x, unsigned char y,
|
|
99 unsigned char carryin, unsigned char *z) {
|
|
100 // CHECK: @test_subcb
|
|
101 // CHECK: %{{.+}} = {{.*}} call { i8, i1 } @llvm.usub.with.overflow.i8(i8 %x, i8 %y)
|
|
102 // CHECK: %{{.+}} = extractvalue { i8, i1 } %{{.+}}, 1
|
|
103 // CHECK: %{{.+}} = extractvalue { i8, i1 } %{{.+}}, 0
|
|
104 // CHECK: %{{.+}} = {{.*}} call { i8, i1 } @llvm.usub.with.overflow.i8(i8 %{{.+}}, i8 %carryin)
|
|
105 // CHECK: %{{.+}} = extractvalue { i8, i1 } %{{.+}}, 1
|
|
106 // CHECK: %{{.+}} = extractvalue { i8, i1 } %{{.+}}, 0
|
|
107 // CHECK: %{{.+}} = or i1 %{{.+}}, %{{.+}}
|
|
108 // CHECK: %{{.+}} = zext i1 %{{.+}} to i8
|
|
109 // CHECK: store i8 %{{.+}}, i8* %z, align 1
|
|
110
|
|
111 unsigned char carryout;
|
|
112 *z = __builtin_subcb(x, y, carryin, &carryout);
|
|
113
|
|
114 return carryout;
|
|
115 }
|
|
116
|
|
117 unsigned short test_subcs(unsigned short x, unsigned short y,
|
|
118 unsigned short carryin, unsigned short *z) {
|
|
119 // CHECK: @test_subcs
|
|
120 // CHECK: %{{.+}} = {{.*}} call { i16, i1 } @llvm.usub.with.overflow.i16(i16 %x, i16 %y)
|
|
121 // CHECK: %{{.+}} = extractvalue { i16, i1 } %{{.+}}, 1
|
|
122 // CHECK: %{{.+}} = extractvalue { i16, i1 } %{{.+}}, 0
|
|
123 // CHECK: %{{.+}} = {{.*}} call { i16, i1 } @llvm.usub.with.overflow.i16(i16 %{{.+}}, i16 %carryin)
|
|
124 // CHECK: %{{.+}} = extractvalue { i16, i1 } %{{.+}}, 1
|
|
125 // CHECK: %{{.+}} = extractvalue { i16, i1 } %{{.+}}, 0
|
|
126 // CHECK: %{{.+}} = or i1 %{{.+}}, %{{.+}}
|
|
127 // CHECK: %{{.+}} = zext i1 %{{.+}} to i16
|
|
128 // CHECK: store i16 %{{.+}}, i16* %z, align 2
|
|
129
|
|
130 unsigned short carryout;
|
|
131 *z = __builtin_subcs(x, y, carryin, &carryout);
|
|
132
|
|
133 return carryout;
|
|
134 }
|
|
135
|
|
136 unsigned test_subc(unsigned x, unsigned y, unsigned carryin, unsigned *z) {
|
|
137 // CHECK: @test_subc
|
|
138 // CHECK: %{{.+}} = {{.*}} call { i32, i1 } @llvm.usub.with.overflow.i32(i32 %x, i32 %y)
|
|
139 // CHECK: %{{.+}} = extractvalue { i32, i1 } %{{.+}}, 1
|
|
140 // CHECK: %{{.+}} = extractvalue { i32, i1 } %{{.+}}, 0
|
|
141 // CHECK: %{{.+}} = {{.*}} call { i32, i1 } @llvm.usub.with.overflow.i32(i32 %{{.+}}, i32 %carryin)
|
|
142 // CHECK: %{{.+}} = extractvalue { i32, i1 } %{{.+}}, 1
|
|
143 // CHECK: %{{.+}} = extractvalue { i32, i1 } %{{.+}}, 0
|
|
144 // CHECK: %{{.+}} = or i1 %{{.+}}, %{{.+}}
|
|
145 // CHECK: %{{.+}} = zext i1 %{{.+}} to i32
|
|
146 // CHECK: store i32 %{{.+}}, i32* %z, align 4
|
|
147 unsigned carryout;
|
|
148 *z = __builtin_subc(x, y, carryin, &carryout);
|
|
149
|
|
150 return carryout;
|
|
151 }
|
|
152
|
|
153 unsigned long test_subcl(unsigned long x, unsigned long y,
|
|
154 unsigned long carryin, unsigned long *z) {
|
|
155 // CHECK: @test_subcl([[UL:i32|i64]] %x
|
|
156 // CHECK: %{{.+}} = {{.*}} call { [[UL]], i1 } @llvm.usub.with.overflow.[[UL]]([[UL]] %x, [[UL]] %y)
|
|
157 // CHECK: %{{.+}} = extractvalue { [[UL]], i1 } %{{.+}}, 1
|
|
158 // CHECK: %{{.+}} = extractvalue { [[UL]], i1 } %{{.+}}, 0
|
|
159 // CHECK: %{{.+}} = {{.*}} call { [[UL]], i1 } @llvm.usub.with.overflow.[[UL]]([[UL]] %{{.+}}, [[UL]] %carryin)
|
|
160 // CHECK: %{{.+}} = extractvalue { [[UL]], i1 } %{{.+}}, 1
|
|
161 // CHECK: %{{.+}} = extractvalue { [[UL]], i1 } %{{.+}}, 0
|
|
162 // CHECK: %{{.+}} = or i1 %{{.+}}, %{{.+}}
|
|
163 // CHECK: %{{.+}} = zext i1 %{{.+}} to [[UL]]
|
|
164 // CHECK: store [[UL]] %{{.+}}, [[UL]]* %z
|
|
165 unsigned long carryout;
|
|
166 *z = __builtin_subcl(x, y, carryin, &carryout);
|
|
167
|
|
168 return carryout;
|
|
169 }
|
|
170
|
|
171 unsigned long long test_subcll(unsigned long long x, unsigned long long y,
|
|
172 unsigned long long carryin,
|
|
173 unsigned long long *z) {
|
|
174 // CHECK: @test_subcll
|
|
175 // CHECK: %{{.+}} = {{.*}} call { i64, i1 } @llvm.usub.with.overflow.i64(i64 %x, i64 %y)
|
|
176 // CHECK: %{{.+}} = extractvalue { i64, i1 } %{{.+}}, 1
|
|
177 // CHECK: %{{.+}} = extractvalue { i64, i1 } %{{.+}}, 0
|
|
178 // CHECK: %{{.+}} = {{.*}} call { i64, i1 } @llvm.usub.with.overflow.i64(i64 %{{.+}}, i64 %carryin)
|
|
179 // CHECK: %{{.+}} = extractvalue { i64, i1 } %{{.+}}, 1
|
|
180 // CHECK: %{{.+}} = extractvalue { i64, i1 } %{{.+}}, 0
|
|
181 // CHECK: %{{.+}} = or i1 %{{.+}}, %{{.+}}
|
|
182 // CHECK: %{{.+}} = zext i1 %{{.+}} to i64
|
|
183 // CHECK: store i64 %{{.+}}, i64* %z
|
|
184 unsigned long long carryout;
|
|
185 *z = __builtin_subcll(x, y, carryin, &carryout);
|
|
186
|
|
187 return carryout;
|
|
188 }
|