145
|
1 /* Code for range operators.
|
|
2 Copyright (C) 2017-2020 Free Software Foundation, Inc.
|
|
3 Contributed by Andrew MacLeod <amacleod@redhat.com>
|
|
4 and Aldy Hernandez <aldyh@redhat.com>.
|
|
5
|
|
6 This file is part of GCC.
|
|
7
|
|
8 GCC is free software; you can redistribute it and/or modify
|
|
9 it under the terms of the GNU General Public License as published by
|
|
10 the Free Software Foundation; either version 3, or (at your option)
|
|
11 any later version.
|
|
12
|
|
13 GCC is distributed in the hope that it will be useful,
|
|
14 but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
16 GNU General Public License for more details.
|
|
17
|
|
18 You should have received a copy of the GNU General Public License
|
|
19 along with GCC; see the file COPYING3. If not see
|
|
20 <http://www.gnu.org/licenses/>. */
|
|
21
|
|
22 #include "config.h"
|
|
23 #include "system.h"
|
|
24 #include "coretypes.h"
|
|
25 #include "backend.h"
|
|
26 #include "insn-codes.h"
|
|
27 #include "rtl.h"
|
|
28 #include "tree.h"
|
|
29 #include "gimple.h"
|
|
30 #include "cfghooks.h"
|
|
31 #include "tree-pass.h"
|
|
32 #include "ssa.h"
|
|
33 #include "optabs-tree.h"
|
|
34 #include "gimple-pretty-print.h"
|
|
35 #include "diagnostic-core.h"
|
|
36 #include "flags.h"
|
|
37 #include "fold-const.h"
|
|
38 #include "stor-layout.h"
|
|
39 #include "calls.h"
|
|
40 #include "cfganal.h"
|
|
41 #include "gimple-fold.h"
|
|
42 #include "tree-eh.h"
|
|
43 #include "gimple-iterator.h"
|
|
44 #include "gimple-walk.h"
|
|
45 #include "tree-cfg.h"
|
|
46 #include "wide-int.h"
|
|
47 #include "range-op.h"
|
|
48
|
|
49 // Return the upper limit for a type.
|
|
50
|
|
51 static inline wide_int
|
|
52 max_limit (const_tree type)
|
|
53 {
|
|
54 return wi::max_value (TYPE_PRECISION (type) , TYPE_SIGN (type));
|
|
55 }
|
|
56
|
|
57 // Return the lower limit for a type.
|
|
58
|
|
59 static inline wide_int
|
|
60 min_limit (const_tree type)
|
|
61 {
|
|
62 return wi::min_value (TYPE_PRECISION (type) , TYPE_SIGN (type));
|
|
63 }
|
|
64
|
|
65 // If the range of either op1 or op2 is undefined, set the result to
|
|
66 // undefined and return TRUE.
|
|
67
|
|
68 inline bool
|
|
69 empty_range_check (value_range &r,
|
|
70 const value_range &op1, const value_range & op2)
|
|
71 {
|
|
72 if (op1.undefined_p () || op2.undefined_p ())
|
|
73 {
|
|
74 r.set_undefined ();
|
|
75 return true;
|
|
76 }
|
|
77 else
|
|
78 return false;
|
|
79 }
|
|
80
|
|
81 // Return TRUE if shifting by OP is undefined behavior, and set R to
|
|
82 // the appropriate range.
|
|
83
|
|
84 static inline bool
|
|
85 undefined_shift_range_check (value_range &r, tree type, const value_range op)
|
|
86 {
|
|
87 if (op.undefined_p ())
|
|
88 {
|
|
89 r = value_range ();
|
|
90 return true;
|
|
91 }
|
|
92
|
|
93 // Shifting by any values outside [0..prec-1], gets undefined
|
|
94 // behavior from the shift operation. We cannot even trust
|
|
95 // SHIFT_COUNT_TRUNCATED at this stage, because that applies to rtl
|
|
96 // shifts, and the operation at the tree level may be widened.
|
|
97 if (wi::lt_p (op.lower_bound (), 0, TYPE_SIGN (op.type ()))
|
|
98 || wi::ge_p (op.upper_bound (),
|
|
99 TYPE_PRECISION (type), TYPE_SIGN (op.type ())))
|
|
100 {
|
|
101 r = value_range (type);
|
|
102 return true;
|
|
103 }
|
|
104 return false;
|
|
105 }
|
|
106
|
|
107 // Return TRUE if 0 is within [WMIN, WMAX].
|
|
108
|
|
109 static inline bool
|
|
110 wi_includes_zero_p (tree type, const wide_int &wmin, const wide_int &wmax)
|
|
111 {
|
|
112 signop sign = TYPE_SIGN (type);
|
|
113 return wi::le_p (wmin, 0, sign) && wi::ge_p (wmax, 0, sign);
|
|
114 }
|
|
115
|
|
116 // Return TRUE if [WMIN, WMAX] is the singleton 0.
|
|
117
|
|
118 static inline bool
|
|
119 wi_zero_p (tree type, const wide_int &wmin, const wide_int &wmax)
|
|
120 {
|
|
121 unsigned prec = TYPE_PRECISION (type);
|
|
122 return wmin == wmax && wi::eq_p (wmin, wi::zero (prec));
|
|
123 }
|
|
124
|
|
125 // Default wide_int fold operation returns [MIN, MAX].
|
|
126
|
|
127 void
|
|
128 range_operator::wi_fold (value_range &r, tree type,
|
|
129 const wide_int &lh_lb ATTRIBUTE_UNUSED,
|
|
130 const wide_int &lh_ub ATTRIBUTE_UNUSED,
|
|
131 const wide_int &rh_lb ATTRIBUTE_UNUSED,
|
|
132 const wide_int &rh_ub ATTRIBUTE_UNUSED) const
|
|
133 {
|
|
134 gcc_checking_assert (value_range::supports_type_p (type));
|
|
135 r = value_range (type);
|
|
136 }
|
|
137
|
|
138 // The default for fold is to break all ranges into sub-ranges and
|
|
139 // invoke the wi_fold method on each sub-range pair.
|
|
140
|
|
141 bool
|
|
142 range_operator::fold_range (value_range &r, tree type,
|
|
143 const value_range &lh,
|
|
144 const value_range &rh) const
|
|
145 {
|
|
146 gcc_checking_assert (value_range::supports_type_p (type));
|
|
147 if (empty_range_check (r, lh, rh))
|
|
148 return true;
|
|
149
|
|
150 value_range tmp;
|
|
151 r.set_undefined ();
|
|
152 for (unsigned x = 0; x < lh.num_pairs (); ++x)
|
|
153 for (unsigned y = 0; y < rh.num_pairs (); ++y)
|
|
154 {
|
|
155 wide_int lh_lb = lh.lower_bound (x);
|
|
156 wide_int lh_ub = lh.upper_bound (x);
|
|
157 wide_int rh_lb = rh.lower_bound (y);
|
|
158 wide_int rh_ub = rh.upper_bound (y);
|
|
159 wi_fold (tmp, type, lh_lb, lh_ub, rh_lb, rh_ub);
|
|
160 r.union_ (tmp);
|
|
161 if (r.varying_p ())
|
|
162 return true;
|
|
163 }
|
|
164 return true;
|
|
165 }
|
|
166
|
|
167 // The default for op1_range is to return false.
|
|
168
|
|
169 bool
|
|
170 range_operator::op1_range (value_range &r ATTRIBUTE_UNUSED,
|
|
171 tree type ATTRIBUTE_UNUSED,
|
|
172 const value_range &lhs ATTRIBUTE_UNUSED,
|
|
173 const value_range &op2 ATTRIBUTE_UNUSED) const
|
|
174 {
|
|
175 return false;
|
|
176 }
|
|
177
|
|
178 // The default for op2_range is to return false.
|
|
179
|
|
180 bool
|
|
181 range_operator::op2_range (value_range &r ATTRIBUTE_UNUSED,
|
|
182 tree type ATTRIBUTE_UNUSED,
|
|
183 const value_range &lhs ATTRIBUTE_UNUSED,
|
|
184 const value_range &op1 ATTRIBUTE_UNUSED) const
|
|
185 {
|
|
186 return false;
|
|
187 }
|
|
188
|
|
189
|
|
190 // Create and return a range from a pair of wide-ints that are known
|
|
191 // to have overflowed (or underflowed).
|
|
192
|
|
193 static void
|
|
194 value_range_from_overflowed_bounds (value_range &r, tree type,
|
|
195 const wide_int &wmin,
|
|
196 const wide_int &wmax)
|
|
197 {
|
|
198 const signop sgn = TYPE_SIGN (type);
|
|
199 const unsigned int prec = TYPE_PRECISION (type);
|
|
200
|
|
201 wide_int tmin = wide_int::from (wmin, prec, sgn);
|
|
202 wide_int tmax = wide_int::from (wmax, prec, sgn);
|
|
203
|
|
204 bool covers = false;
|
|
205 wide_int tem = tmin;
|
|
206 tmin = tmax + 1;
|
|
207 if (wi::cmp (tmin, tmax, sgn) < 0)
|
|
208 covers = true;
|
|
209 tmax = tem - 1;
|
|
210 if (wi::cmp (tmax, tem, sgn) > 0)
|
|
211 covers = true;
|
|
212
|
|
213 // If the anti-range would cover nothing, drop to varying.
|
|
214 // Likewise if the anti-range bounds are outside of the types
|
|
215 // values.
|
|
216 if (covers || wi::cmp (tmin, tmax, sgn) > 0)
|
|
217 r = value_range (type);
|
|
218 else
|
|
219 r = value_range (type, tmin, tmax, VR_ANTI_RANGE);
|
|
220 }
|
|
221
|
|
222 // Create and return a range from a pair of wide-ints. MIN_OVF and
|
|
223 // MAX_OVF describe any overflow that might have occurred while
|
|
224 // calculating WMIN and WMAX respectively.
|
|
225
|
|
226 static void
|
|
227 value_range_with_overflow (value_range &r, tree type,
|
|
228 const wide_int &wmin, const wide_int &wmax,
|
|
229 wi::overflow_type min_ovf = wi::OVF_NONE,
|
|
230 wi::overflow_type max_ovf = wi::OVF_NONE)
|
|
231 {
|
|
232 const signop sgn = TYPE_SIGN (type);
|
|
233 const unsigned int prec = TYPE_PRECISION (type);
|
|
234 const bool overflow_wraps = TYPE_OVERFLOW_WRAPS (type);
|
|
235
|
|
236 // For one bit precision if max != min, then the range covers all
|
|
237 // values.
|
|
238 if (prec == 1 && wi::ne_p (wmax, wmin))
|
|
239 {
|
|
240 r = value_range (type);
|
|
241 return;
|
|
242 }
|
|
243
|
|
244 if (overflow_wraps)
|
|
245 {
|
|
246 // If overflow wraps, truncate the values and adjust the range,
|
|
247 // kind, and bounds appropriately.
|
|
248 if ((min_ovf != wi::OVF_NONE) == (max_ovf != wi::OVF_NONE))
|
|
249 {
|
|
250 wide_int tmin = wide_int::from (wmin, prec, sgn);
|
|
251 wide_int tmax = wide_int::from (wmax, prec, sgn);
|
|
252 // If the limits are swapped, we wrapped around and cover
|
|
253 // the entire range.
|
|
254 if (wi::gt_p (tmin, tmax, sgn))
|
|
255 r = value_range (type);
|
|
256 else
|
|
257 // No overflow or both overflow or underflow. The range
|
|
258 // kind stays normal.
|
|
259 r = value_range (type, tmin, tmax);
|
|
260 return;
|
|
261 }
|
|
262
|
|
263 if ((min_ovf == wi::OVF_UNDERFLOW && max_ovf == wi::OVF_NONE)
|
|
264 || (max_ovf == wi::OVF_OVERFLOW && min_ovf == wi::OVF_NONE))
|
|
265 value_range_from_overflowed_bounds (r, type, wmin, wmax);
|
|
266 else
|
|
267 // Other underflow and/or overflow, drop to VR_VARYING.
|
|
268 r = value_range (type);
|
|
269 }
|
|
270 else
|
|
271 {
|
|
272 // If overflow does not wrap, saturate to [MIN, MAX].
|
|
273 wide_int new_lb, new_ub;
|
|
274 if (min_ovf == wi::OVF_UNDERFLOW)
|
|
275 new_lb = wi::min_value (prec, sgn);
|
|
276 else if (min_ovf == wi::OVF_OVERFLOW)
|
|
277 new_lb = wi::max_value (prec, sgn);
|
|
278 else
|
|
279 new_lb = wmin;
|
|
280
|
|
281 if (max_ovf == wi::OVF_UNDERFLOW)
|
|
282 new_ub = wi::min_value (prec, sgn);
|
|
283 else if (max_ovf == wi::OVF_OVERFLOW)
|
|
284 new_ub = wi::max_value (prec, sgn);
|
|
285 else
|
|
286 new_ub = wmax;
|
|
287
|
|
288 r = value_range (type, new_lb, new_ub);
|
|
289 }
|
|
290 }
|
|
291
|
|
292 // Create and return a range from a pair of wide-ints. Canonicalize
|
|
293 // the case where the bounds are swapped. In which case, we transform
|
|
294 // [10,5] into [MIN,5][10,MAX].
|
|
295
|
|
296 static inline void
|
|
297 create_possibly_reversed_range (value_range &r, tree type,
|
|
298 const wide_int &new_lb, const wide_int &new_ub)
|
|
299 {
|
|
300 signop s = TYPE_SIGN (type);
|
|
301 // If the bounds are swapped, treat the result as if an overflow occured.
|
|
302 if (wi::gt_p (new_lb, new_ub, s))
|
|
303 value_range_from_overflowed_bounds (r, type, new_lb, new_ub);
|
|
304 else
|
|
305 // Otherwise its just a normal range.
|
|
306 r = value_range (type, new_lb, new_ub);
|
|
307 }
|
|
308
|
|
309 // Return a value_range instance that is a boolean TRUE.
|
|
310
|
|
311 static inline value_range
|
|
312 range_true (tree type)
|
|
313 {
|
|
314 unsigned prec = TYPE_PRECISION (type);
|
|
315 return value_range (type, wi::one (prec), wi::one (prec));
|
|
316 }
|
|
317
|
|
318 // Return a value_range instance that is a boolean FALSE.
|
|
319
|
|
320 static inline value_range
|
|
321 range_false (tree type)
|
|
322 {
|
|
323 unsigned prec = TYPE_PRECISION (type);
|
|
324 return value_range (type, wi::zero (prec), wi::zero (prec));
|
|
325 }
|
|
326
|
|
327 // Return a value_range that covers both true and false.
|
|
328
|
|
329 static inline value_range
|
|
330 range_true_and_false (tree type)
|
|
331 {
|
|
332 unsigned prec = TYPE_PRECISION (type);
|
|
333 return value_range (type, wi::zero (prec), wi::one (prec));
|
|
334 }
|
|
335
|
|
336 enum bool_range_state { BRS_FALSE, BRS_TRUE, BRS_EMPTY, BRS_FULL };
|
|
337
|
|
338 // Return the summary information about boolean range LHS. Return an
|
|
339 // "interesting" range in R. For EMPTY or FULL, return the equivalent
|
|
340 // range for TYPE, for BRS_TRUE and BRS false, return the negation of
|
|
341 // the bool range.
|
|
342
|
|
343 static bool_range_state
|
|
344 get_bool_state (value_range &r, const value_range &lhs, tree val_type)
|
|
345 {
|
|
346 // If there is no result, then this is unexecutable.
|
|
347 if (lhs.undefined_p ())
|
|
348 {
|
|
349 r.set_undefined ();
|
|
350 return BRS_EMPTY;
|
|
351 }
|
|
352
|
|
353 // If the bounds aren't the same, then it's not a constant.
|
|
354 if (!wi::eq_p (lhs.upper_bound (), lhs.lower_bound ()))
|
|
355 {
|
|
356 r.set_varying (val_type);
|
|
357 return BRS_FULL;
|
|
358 }
|
|
359
|
|
360 if (lhs.zero_p ())
|
|
361 return BRS_FALSE;
|
|
362
|
|
363 return BRS_TRUE;
|
|
364 }
|
|
365
|
|
366
|
|
367 class operator_equal : public range_operator
|
|
368 {
|
|
369 public:
|
|
370 virtual bool fold_range (value_range &r, tree type,
|
|
371 const value_range &op1,
|
|
372 const value_range &op2) const;
|
|
373 virtual bool op1_range (value_range &r, tree type,
|
|
374 const value_range &lhs,
|
|
375 const value_range &val) const;
|
|
376 virtual bool op2_range (value_range &r, tree type,
|
|
377 const value_range &lhs,
|
|
378 const value_range &val) const;
|
|
379 } op_equal;
|
|
380
|
|
381 bool
|
|
382 operator_equal::fold_range (value_range &r, tree type,
|
|
383 const value_range &op1,
|
|
384 const value_range &op2) const
|
|
385 {
|
|
386 if (empty_range_check (r, op1, op2))
|
|
387 return true;
|
|
388
|
|
389 // We can be sure the values are always equal or not if both ranges
|
|
390 // consist of a single value, and then compare them.
|
|
391 if (wi::eq_p (op1.lower_bound (), op1.upper_bound ())
|
|
392 && wi::eq_p (op2.lower_bound (), op2.upper_bound ()))
|
|
393 {
|
|
394 if (wi::eq_p (op1.lower_bound (), op2.upper_bound()))
|
|
395 r = range_true (type);
|
|
396 else
|
|
397 r = range_false (type);
|
|
398 }
|
|
399 else
|
|
400 {
|
|
401 // If ranges do not intersect, we know the range is not equal,
|
|
402 // otherwise we don't know anything for sure.
|
|
403 r = op1;
|
|
404 r.intersect (op2);
|
|
405 if (r.undefined_p ())
|
|
406 r = range_false (type);
|
|
407 else
|
|
408 r = range_true_and_false (type);
|
|
409 }
|
|
410 return true;
|
|
411 }
|
|
412
|
|
413 bool
|
|
414 operator_equal::op1_range (value_range &r, tree type,
|
|
415 const value_range &lhs,
|
|
416 const value_range &op2) const
|
|
417 {
|
|
418 switch (get_bool_state (r, lhs, type))
|
|
419 {
|
|
420 case BRS_FALSE:
|
|
421 // If the result is false, the only time we know anything is
|
|
422 // if OP2 is a constant.
|
|
423 if (wi::eq_p (op2.lower_bound(), op2.upper_bound()))
|
|
424 {
|
|
425 r = op2;
|
|
426 r.invert ();
|
|
427 }
|
|
428 else
|
|
429 r.set_varying (type);
|
|
430 break;
|
|
431
|
|
432 case BRS_TRUE:
|
|
433 // If it's true, the result is the same as OP2.
|
|
434 r = op2;
|
|
435 break;
|
|
436
|
|
437 default:
|
|
438 break;
|
|
439 }
|
|
440 return true;
|
|
441 }
|
|
442
|
|
443 bool
|
|
444 operator_equal::op2_range (value_range &r, tree type,
|
|
445 const value_range &lhs,
|
|
446 const value_range &op1) const
|
|
447 {
|
|
448 return operator_equal::op1_range (r, type, lhs, op1);
|
|
449 }
|
|
450
|
|
451
|
|
452 class operator_not_equal : public range_operator
|
|
453 {
|
|
454 public:
|
|
455 virtual bool fold_range (value_range &r, tree type,
|
|
456 const value_range &op1,
|
|
457 const value_range &op2) const;
|
|
458 virtual bool op1_range (value_range &r, tree type,
|
|
459 const value_range &lhs,
|
|
460 const value_range &op2) const;
|
|
461 virtual bool op2_range (value_range &r, tree type,
|
|
462 const value_range &lhs,
|
|
463 const value_range &op1) const;
|
|
464 } op_not_equal;
|
|
465
|
|
466 bool
|
|
467 operator_not_equal::fold_range (value_range &r, tree type,
|
|
468 const value_range &op1,
|
|
469 const value_range &op2) const
|
|
470 {
|
|
471 if (empty_range_check (r, op1, op2))
|
|
472 return true;
|
|
473
|
|
474 // We can be sure the values are always equal or not if both ranges
|
|
475 // consist of a single value, and then compare them.
|
|
476 if (wi::eq_p (op1.lower_bound (), op1.upper_bound ())
|
|
477 && wi::eq_p (op2.lower_bound (), op2.upper_bound ()))
|
|
478 {
|
|
479 if (wi::ne_p (op1.lower_bound (), op2.upper_bound()))
|
|
480 r = range_true (type);
|
|
481 else
|
|
482 r = range_false (type);
|
|
483 }
|
|
484 else
|
|
485 {
|
|
486 // If ranges do not intersect, we know the range is not equal,
|
|
487 // otherwise we don't know anything for sure.
|
|
488 r = op1;
|
|
489 r.intersect (op2);
|
|
490 if (r.undefined_p ())
|
|
491 r = range_true (type);
|
|
492 else
|
|
493 r = range_true_and_false (type);
|
|
494 }
|
|
495 return true;
|
|
496 }
|
|
497
|
|
498 bool
|
|
499 operator_not_equal::op1_range (value_range &r, tree type,
|
|
500 const value_range &lhs,
|
|
501 const value_range &op2) const
|
|
502 {
|
|
503 switch (get_bool_state (r, lhs, type))
|
|
504 {
|
|
505 case BRS_TRUE:
|
|
506 // If the result is true, the only time we know anything is if
|
|
507 // OP2 is a constant.
|
|
508 if (wi::eq_p (op2.lower_bound(), op2.upper_bound()))
|
|
509 {
|
|
510 r = op2;
|
|
511 r.invert ();
|
|
512 }
|
|
513 else
|
|
514 r.set_varying (type);
|
|
515 break;
|
|
516
|
|
517 case BRS_FALSE:
|
|
518 // If its true, the result is the same as OP2.
|
|
519 r = op2;
|
|
520 break;
|
|
521
|
|
522 default:
|
|
523 break;
|
|
524 }
|
|
525 return true;
|
|
526 }
|
|
527
|
|
528
|
|
529 bool
|
|
530 operator_not_equal::op2_range (value_range &r, tree type,
|
|
531 const value_range &lhs,
|
|
532 const value_range &op1) const
|
|
533 {
|
|
534 return operator_not_equal::op1_range (r, type, lhs, op1);
|
|
535 }
|
|
536
|
|
537 // (X < VAL) produces the range of [MIN, VAL - 1].
|
|
538
|
|
539 static void
|
|
540 build_lt (value_range &r, tree type, const wide_int &val)
|
|
541 {
|
|
542 wi::overflow_type ov;
|
|
543 wide_int lim = wi::sub (val, 1, TYPE_SIGN (type), &ov);
|
|
544
|
|
545 // If val - 1 underflows, check if X < MIN, which is an empty range.
|
|
546 if (ov)
|
|
547 r.set_undefined ();
|
|
548 else
|
|
549 r = value_range (type, min_limit (type), lim);
|
|
550 }
|
|
551
|
|
552 // (X <= VAL) produces the range of [MIN, VAL].
|
|
553
|
|
554 static void
|
|
555 build_le (value_range &r, tree type, const wide_int &val)
|
|
556 {
|
|
557 r = value_range (type, min_limit (type), val);
|
|
558 }
|
|
559
|
|
560 // (X > VAL) produces the range of [VAL + 1, MAX].
|
|
561
|
|
562 static void
|
|
563 build_gt (value_range &r, tree type, const wide_int &val)
|
|
564 {
|
|
565 wi::overflow_type ov;
|
|
566 wide_int lim = wi::add (val, 1, TYPE_SIGN (type), &ov);
|
|
567 // If val + 1 overflows, check is for X > MAX, which is an empty range.
|
|
568 if (ov)
|
|
569 r.set_undefined ();
|
|
570 else
|
|
571 r = value_range (type, lim, max_limit (type));
|
|
572 }
|
|
573
|
|
574 // (X >= val) produces the range of [VAL, MAX].
|
|
575
|
|
576 static void
|
|
577 build_ge (value_range &r, tree type, const wide_int &val)
|
|
578 {
|
|
579 r = value_range (type, val, max_limit (type));
|
|
580 }
|
|
581
|
|
582
|
|
583 class operator_lt : public range_operator
|
|
584 {
|
|
585 public:
|
|
586 virtual bool fold_range (value_range &r, tree type,
|
|
587 const value_range &op1,
|
|
588 const value_range &op2) const;
|
|
589 virtual bool op1_range (value_range &r, tree type,
|
|
590 const value_range &lhs,
|
|
591 const value_range &op2) const;
|
|
592 virtual bool op2_range (value_range &r, tree type,
|
|
593 const value_range &lhs,
|
|
594 const value_range &op1) const;
|
|
595 } op_lt;
|
|
596
|
|
597 bool
|
|
598 operator_lt::fold_range (value_range &r, tree type,
|
|
599 const value_range &op1,
|
|
600 const value_range &op2) const
|
|
601 {
|
|
602 if (empty_range_check (r, op1, op2))
|
|
603 return true;
|
|
604
|
|
605 signop sign = TYPE_SIGN (op1.type ());
|
|
606 gcc_checking_assert (sign == TYPE_SIGN (op2.type ()));
|
|
607
|
|
608 if (wi::lt_p (op1.upper_bound (), op2.lower_bound (), sign))
|
|
609 r = range_true (type);
|
|
610 else if (!wi::lt_p (op1.lower_bound (), op2.upper_bound (), sign))
|
|
611 r = range_false (type);
|
|
612 else
|
|
613 r = range_true_and_false (type);
|
|
614 return true;
|
|
615 }
|
|
616
|
|
617 bool
|
|
618 operator_lt::op1_range (value_range &r, tree type,
|
|
619 const value_range &lhs,
|
|
620 const value_range &op2) const
|
|
621 {
|
|
622 switch (get_bool_state (r, lhs, type))
|
|
623 {
|
|
624 case BRS_TRUE:
|
|
625 build_lt (r, type, op2.upper_bound ());
|
|
626 break;
|
|
627
|
|
628 case BRS_FALSE:
|
|
629 build_ge (r, type, op2.lower_bound ());
|
|
630 break;
|
|
631
|
|
632 default:
|
|
633 break;
|
|
634 }
|
|
635 return true;
|
|
636 }
|
|
637
|
|
638 bool
|
|
639 operator_lt::op2_range (value_range &r, tree type,
|
|
640 const value_range &lhs,
|
|
641 const value_range &op1) const
|
|
642 {
|
|
643 switch (get_bool_state (r, lhs, type))
|
|
644 {
|
|
645 case BRS_FALSE:
|
|
646 build_le (r, type, op1.upper_bound ());
|
|
647 break;
|
|
648
|
|
649 case BRS_TRUE:
|
|
650 build_gt (r, type, op1.lower_bound ());
|
|
651 break;
|
|
652
|
|
653 default:
|
|
654 break;
|
|
655 }
|
|
656 return true;
|
|
657 }
|
|
658
|
|
659
|
|
660 class operator_le : public range_operator
|
|
661 {
|
|
662 public:
|
|
663 virtual bool fold_range (value_range &r, tree type,
|
|
664 const value_range &op1,
|
|
665 const value_range &op2) const;
|
|
666 virtual bool op1_range (value_range &r, tree type,
|
|
667 const value_range &lhs,
|
|
668 const value_range &op2) const;
|
|
669 virtual bool op2_range (value_range &r, tree type,
|
|
670 const value_range &lhs,
|
|
671 const value_range &op1) const;
|
|
672 } op_le;
|
|
673
|
|
674 bool
|
|
675 operator_le::fold_range (value_range &r, tree type,
|
|
676 const value_range &op1,
|
|
677 const value_range &op2) const
|
|
678 {
|
|
679 if (empty_range_check (r, op1, op2))
|
|
680 return true;
|
|
681
|
|
682 signop sign = TYPE_SIGN (op1.type ());
|
|
683 gcc_checking_assert (sign == TYPE_SIGN (op2.type ()));
|
|
684
|
|
685 if (wi::le_p (op1.upper_bound (), op2.lower_bound (), sign))
|
|
686 r = range_true (type);
|
|
687 else if (!wi::le_p (op1.lower_bound (), op2.upper_bound (), sign))
|
|
688 r = range_false (type);
|
|
689 else
|
|
690 r = range_true_and_false (type);
|
|
691 return true;
|
|
692 }
|
|
693
|
|
694 bool
|
|
695 operator_le::op1_range (value_range &r, tree type,
|
|
696 const value_range &lhs,
|
|
697 const value_range &op2) const
|
|
698 {
|
|
699 switch (get_bool_state (r, lhs, type))
|
|
700 {
|
|
701 case BRS_TRUE:
|
|
702 build_le (r, type, op2.upper_bound ());
|
|
703 break;
|
|
704
|
|
705 case BRS_FALSE:
|
|
706 build_gt (r, type, op2.lower_bound ());
|
|
707 break;
|
|
708
|
|
709 default:
|
|
710 break;
|
|
711 }
|
|
712 return true;
|
|
713 }
|
|
714
|
|
715 bool
|
|
716 operator_le::op2_range (value_range &r, tree type,
|
|
717 const value_range &lhs,
|
|
718 const value_range &op1) const
|
|
719 {
|
|
720 switch (get_bool_state (r, lhs, type))
|
|
721 {
|
|
722 case BRS_FALSE:
|
|
723 build_lt (r, type, op1.upper_bound ());
|
|
724 break;
|
|
725
|
|
726 case BRS_TRUE:
|
|
727 build_ge (r, type, op1.lower_bound ());
|
|
728 break;
|
|
729
|
|
730 default:
|
|
731 break;
|
|
732 }
|
|
733 return true;
|
|
734 }
|
|
735
|
|
736
|
|
737 class operator_gt : public range_operator
|
|
738 {
|
|
739 public:
|
|
740 virtual bool fold_range (value_range &r, tree type,
|
|
741 const value_range &op1,
|
|
742 const value_range &op2) const;
|
|
743 virtual bool op1_range (value_range &r, tree type,
|
|
744 const value_range &lhs,
|
|
745 const value_range &op2) const;
|
|
746 virtual bool op2_range (value_range &r, tree type,
|
|
747 const value_range &lhs,
|
|
748 const value_range &op1) const;
|
|
749 } op_gt;
|
|
750
|
|
751 bool
|
|
752 operator_gt::fold_range (value_range &r, tree type,
|
|
753 const value_range &op1, const value_range &op2) const
|
|
754 {
|
|
755 if (empty_range_check (r, op1, op2))
|
|
756 return true;
|
|
757
|
|
758 signop sign = TYPE_SIGN (op1.type ());
|
|
759 gcc_checking_assert (sign == TYPE_SIGN (op2.type ()));
|
|
760
|
|
761 if (wi::gt_p (op1.lower_bound (), op2.upper_bound (), sign))
|
|
762 r = range_true (type);
|
|
763 else if (!wi::gt_p (op1.upper_bound (), op2.lower_bound (), sign))
|
|
764 r = range_false (type);
|
|
765 else
|
|
766 r = range_true_and_false (type);
|
|
767 return true;
|
|
768 }
|
|
769
|
|
770 bool
|
|
771 operator_gt::op1_range (value_range &r, tree type,
|
|
772 const value_range &lhs, const value_range &op2) const
|
|
773 {
|
|
774 switch (get_bool_state (r, lhs, type))
|
|
775 {
|
|
776 case BRS_TRUE:
|
|
777 build_gt (r, type, op2.lower_bound ());
|
|
778 break;
|
|
779
|
|
780 case BRS_FALSE:
|
|
781 build_le (r, type, op2.upper_bound ());
|
|
782 break;
|
|
783
|
|
784 default:
|
|
785 break;
|
|
786 }
|
|
787 return true;
|
|
788 }
|
|
789
|
|
790 bool
|
|
791 operator_gt::op2_range (value_range &r, tree type,
|
|
792 const value_range &lhs,
|
|
793 const value_range &op1) const
|
|
794 {
|
|
795 switch (get_bool_state (r, lhs, type))
|
|
796 {
|
|
797 case BRS_FALSE:
|
|
798 build_ge (r, type, op1.lower_bound ());
|
|
799 break;
|
|
800
|
|
801 case BRS_TRUE:
|
|
802 build_lt (r, type, op1.upper_bound ());
|
|
803 break;
|
|
804
|
|
805 default:
|
|
806 break;
|
|
807 }
|
|
808 return true;
|
|
809 }
|
|
810
|
|
811
|
|
812 class operator_ge : public range_operator
|
|
813 {
|
|
814 public:
|
|
815 virtual bool fold_range (value_range &r, tree type,
|
|
816 const value_range &op1,
|
|
817 const value_range &op2) const;
|
|
818 virtual bool op1_range (value_range &r, tree type,
|
|
819 const value_range &lhs,
|
|
820 const value_range &op2) const;
|
|
821 virtual bool op2_range (value_range &r, tree type,
|
|
822 const value_range &lhs,
|
|
823 const value_range &op1) const;
|
|
824 } op_ge;
|
|
825
|
|
826 bool
|
|
827 operator_ge::fold_range (value_range &r, tree type,
|
|
828 const value_range &op1,
|
|
829 const value_range &op2) const
|
|
830 {
|
|
831 if (empty_range_check (r, op1, op2))
|
|
832 return true;
|
|
833
|
|
834 signop sign = TYPE_SIGN (op1.type ());
|
|
835 gcc_checking_assert (sign == TYPE_SIGN (op2.type ()));
|
|
836
|
|
837 if (wi::ge_p (op1.lower_bound (), op2.upper_bound (), sign))
|
|
838 r = range_true (type);
|
|
839 else if (!wi::ge_p (op1.upper_bound (), op2.lower_bound (), sign))
|
|
840 r = range_false (type);
|
|
841 else
|
|
842 r = range_true_and_false (type);
|
|
843 return true;
|
|
844 }
|
|
845
|
|
846 bool
|
|
847 operator_ge::op1_range (value_range &r, tree type,
|
|
848 const value_range &lhs,
|
|
849 const value_range &op2) const
|
|
850 {
|
|
851 switch (get_bool_state (r, lhs, type))
|
|
852 {
|
|
853 case BRS_TRUE:
|
|
854 build_ge (r, type, op2.lower_bound ());
|
|
855 break;
|
|
856
|
|
857 case BRS_FALSE:
|
|
858 build_lt (r, type, op2.upper_bound ());
|
|
859 break;
|
|
860
|
|
861 default:
|
|
862 break;
|
|
863 }
|
|
864 return true;
|
|
865 }
|
|
866
|
|
867 bool
|
|
868 operator_ge::op2_range (value_range &r, tree type,
|
|
869 const value_range &lhs,
|
|
870 const value_range &op1) const
|
|
871 {
|
|
872 switch (get_bool_state (r, lhs, type))
|
|
873 {
|
|
874 case BRS_FALSE:
|
|
875 build_gt (r, type, op1.lower_bound ());
|
|
876 break;
|
|
877
|
|
878 case BRS_TRUE:
|
|
879 build_le (r, type, op1.upper_bound ());
|
|
880 break;
|
|
881
|
|
882 default:
|
|
883 break;
|
|
884 }
|
|
885 return true;
|
|
886 }
|
|
887
|
|
888
|
|
889 class operator_plus : public range_operator
|
|
890 {
|
|
891 public:
|
|
892 virtual bool op1_range (value_range &r, tree type,
|
|
893 const value_range &lhs,
|
|
894 const value_range &op2) const;
|
|
895 virtual bool op2_range (value_range &r, tree type,
|
|
896 const value_range &lhs,
|
|
897 const value_range &op1) const;
|
|
898 virtual void wi_fold (value_range &r, tree type,
|
|
899 const wide_int &lh_lb,
|
|
900 const wide_int &lh_ub,
|
|
901 const wide_int &rh_lb,
|
|
902 const wide_int &rh_ub) const;
|
|
903 } op_plus;
|
|
904
|
|
905 void
|
|
906 operator_plus::wi_fold (value_range &r, tree type,
|
|
907 const wide_int &lh_lb, const wide_int &lh_ub,
|
|
908 const wide_int &rh_lb, const wide_int &rh_ub) const
|
|
909 {
|
|
910 wi::overflow_type ov_lb, ov_ub;
|
|
911 signop s = TYPE_SIGN (type);
|
|
912 wide_int new_lb = wi::add (lh_lb, rh_lb, s, &ov_lb);
|
|
913 wide_int new_ub = wi::add (lh_ub, rh_ub, s, &ov_ub);
|
|
914 value_range_with_overflow (r, type, new_lb, new_ub, ov_lb, ov_ub);
|
|
915 }
|
|
916
|
|
917 bool
|
|
918 operator_plus::op1_range (value_range &r, tree type,
|
|
919 const value_range &lhs,
|
|
920 const value_range &op2) const
|
|
921 {
|
|
922 return range_op_handler (MINUS_EXPR, type)->fold_range (r, type, lhs, op2);
|
|
923 }
|
|
924
|
|
925 bool
|
|
926 operator_plus::op2_range (value_range &r, tree type,
|
|
927 const value_range &lhs,
|
|
928 const value_range &op1) const
|
|
929 {
|
|
930 return range_op_handler (MINUS_EXPR, type)->fold_range (r, type, lhs, op1);
|
|
931 }
|
|
932
|
|
933
|
|
934 class operator_minus : public range_operator
|
|
935 {
|
|
936 public:
|
|
937 virtual bool op1_range (value_range &r, tree type,
|
|
938 const value_range &lhs,
|
|
939 const value_range &op2) const;
|
|
940 virtual bool op2_range (value_range &r, tree type,
|
|
941 const value_range &lhs,
|
|
942 const value_range &op1) const;
|
|
943 virtual void wi_fold (value_range &r, tree type,
|
|
944 const wide_int &lh_lb,
|
|
945 const wide_int &lh_ub,
|
|
946 const wide_int &rh_lb,
|
|
947 const wide_int &rh_ub) const;
|
|
948 } op_minus;
|
|
949
|
|
950 void
|
|
951 operator_minus::wi_fold (value_range &r, tree type,
|
|
952 const wide_int &lh_lb, const wide_int &lh_ub,
|
|
953 const wide_int &rh_lb, const wide_int &rh_ub) const
|
|
954 {
|
|
955 wi::overflow_type ov_lb, ov_ub;
|
|
956 signop s = TYPE_SIGN (type);
|
|
957 wide_int new_lb = wi::sub (lh_lb, rh_ub, s, &ov_lb);
|
|
958 wide_int new_ub = wi::sub (lh_ub, rh_lb, s, &ov_ub);
|
|
959 value_range_with_overflow (r, type, new_lb, new_ub, ov_lb, ov_ub);
|
|
960 }
|
|
961
|
|
962 bool
|
|
963 operator_minus::op1_range (value_range &r, tree type,
|
|
964 const value_range &lhs,
|
|
965 const value_range &op2) const
|
|
966 {
|
|
967 return range_op_handler (PLUS_EXPR, type)->fold_range (r, type, lhs, op2);
|
|
968 }
|
|
969
|
|
970 bool
|
|
971 operator_minus::op2_range (value_range &r, tree type,
|
|
972 const value_range &lhs,
|
|
973 const value_range &op1) const
|
|
974 {
|
|
975 return fold_range (r, type, op1, lhs);
|
|
976 }
|
|
977
|
|
978
|
|
979 class operator_min : public range_operator
|
|
980 {
|
|
981 public:
|
|
982 virtual void wi_fold (value_range &r, tree type,
|
|
983 const wide_int &lh_lb,
|
|
984 const wide_int &lh_ub,
|
|
985 const wide_int &rh_lb,
|
|
986 const wide_int &rh_ub) const;
|
|
987 } op_min;
|
|
988
|
|
989 void
|
|
990 operator_min::wi_fold (value_range &r, tree type,
|
|
991 const wide_int &lh_lb, const wide_int &lh_ub,
|
|
992 const wide_int &rh_lb, const wide_int &rh_ub) const
|
|
993 {
|
|
994 signop s = TYPE_SIGN (type);
|
|
995 wide_int new_lb = wi::min (lh_lb, rh_lb, s);
|
|
996 wide_int new_ub = wi::min (lh_ub, rh_ub, s);
|
|
997 value_range_with_overflow (r, type, new_lb, new_ub);
|
|
998 }
|
|
999
|
|
1000
|
|
1001 class operator_max : public range_operator
|
|
1002 {
|
|
1003 public:
|
|
1004 virtual void wi_fold (value_range &r, tree type,
|
|
1005 const wide_int &lh_lb,
|
|
1006 const wide_int &lh_ub,
|
|
1007 const wide_int &rh_lb,
|
|
1008 const wide_int &rh_ub) const;
|
|
1009 } op_max;
|
|
1010
|
|
1011 void
|
|
1012 operator_max::wi_fold (value_range &r, tree type,
|
|
1013 const wide_int &lh_lb, const wide_int &lh_ub,
|
|
1014 const wide_int &rh_lb, const wide_int &rh_ub) const
|
|
1015 {
|
|
1016 signop s = TYPE_SIGN (type);
|
|
1017 wide_int new_lb = wi::max (lh_lb, rh_lb, s);
|
|
1018 wide_int new_ub = wi::max (lh_ub, rh_ub, s);
|
|
1019 value_range_with_overflow (r, type, new_lb, new_ub);
|
|
1020 }
|
|
1021
|
|
1022
|
|
1023 class cross_product_operator : public range_operator
|
|
1024 {
|
|
1025 public:
|
|
1026 // Perform an operation between two wide-ints and place the result
|
|
1027 // in R. Return true if the operation overflowed.
|
|
1028 virtual bool wi_op_overflows (wide_int &r,
|
|
1029 tree type,
|
|
1030 const wide_int &,
|
|
1031 const wide_int &) const = 0;
|
|
1032
|
|
1033 // Calculate the cross product of two sets of sub-ranges and return it.
|
|
1034 void wi_cross_product (value_range &r, tree type,
|
|
1035 const wide_int &lh_lb,
|
|
1036 const wide_int &lh_ub,
|
|
1037 const wide_int &rh_lb,
|
|
1038 const wide_int &rh_ub) const;
|
|
1039 };
|
|
1040
|
|
1041 // Calculate the cross product of two sets of ranges and return it.
|
|
1042 //
|
|
1043 // Multiplications, divisions and shifts are a bit tricky to handle,
|
|
1044 // depending on the mix of signs we have in the two ranges, we need to
|
|
1045 // operate on different values to get the minimum and maximum values
|
|
1046 // for the new range. One approach is to figure out all the
|
|
1047 // variations of range combinations and do the operations.
|
|
1048 //
|
|
1049 // However, this involves several calls to compare_values and it is
|
|
1050 // pretty convoluted. It's simpler to do the 4 operations (MIN0 OP
|
|
1051 // MIN1, MIN0 OP MAX1, MAX0 OP MIN1 and MAX0 OP MAX0 OP MAX1) and then
|
|
1052 // figure the smallest and largest values to form the new range.
|
|
1053
|
|
1054 void
|
|
1055 cross_product_operator::wi_cross_product (value_range &r, tree type,
|
|
1056 const wide_int &lh_lb,
|
|
1057 const wide_int &lh_ub,
|
|
1058 const wide_int &rh_lb,
|
|
1059 const wide_int &rh_ub) const
|
|
1060 {
|
|
1061 wide_int cp1, cp2, cp3, cp4;
|
|
1062 // Default to varying.
|
|
1063 r = value_range (type);
|
|
1064
|
|
1065 // Compute the 4 cross operations, bailing if we get an overflow we
|
|
1066 // can't handle.
|
|
1067 if (wi_op_overflows (cp1, type, lh_lb, rh_lb))
|
|
1068 return;
|
|
1069 if (wi::eq_p (lh_lb, lh_ub))
|
|
1070 cp3 = cp1;
|
|
1071 else if (wi_op_overflows (cp3, type, lh_ub, rh_lb))
|
|
1072 return;
|
|
1073 if (wi::eq_p (rh_lb, rh_ub))
|
|
1074 cp2 = cp1;
|
|
1075 else if (wi_op_overflows (cp2, type, lh_lb, rh_ub))
|
|
1076 return;
|
|
1077 if (wi::eq_p (lh_lb, lh_ub))
|
|
1078 cp4 = cp2;
|
|
1079 else if (wi_op_overflows (cp4, type, lh_ub, rh_ub))
|
|
1080 return;
|
|
1081
|
|
1082 // Order pairs.
|
|
1083 signop sign = TYPE_SIGN (type);
|
|
1084 if (wi::gt_p (cp1, cp2, sign))
|
|
1085 std::swap (cp1, cp2);
|
|
1086 if (wi::gt_p (cp3, cp4, sign))
|
|
1087 std::swap (cp3, cp4);
|
|
1088
|
|
1089 // Choose min and max from the ordered pairs.
|
|
1090 wide_int res_lb = wi::min (cp1, cp3, sign);
|
|
1091 wide_int res_ub = wi::max (cp2, cp4, sign);
|
|
1092 value_range_with_overflow (r, type, res_lb, res_ub);
|
|
1093 }
|
|
1094
|
|
1095
|
|
1096 class operator_mult : public cross_product_operator
|
|
1097 {
|
|
1098 public:
|
|
1099 virtual void wi_fold (value_range &r, tree type,
|
|
1100 const wide_int &lh_lb,
|
|
1101 const wide_int &lh_ub,
|
|
1102 const wide_int &rh_lb,
|
|
1103 const wide_int &rh_ub) const;
|
|
1104 virtual bool wi_op_overflows (wide_int &res, tree type,
|
|
1105 const wide_int &w0, const wide_int &w1) const;
|
|
1106 } op_mult;
|
|
1107
|
|
1108 bool
|
|
1109 operator_mult::wi_op_overflows (wide_int &res, tree type,
|
|
1110 const wide_int &w0, const wide_int &w1) const
|
|
1111 {
|
|
1112 wi::overflow_type overflow = wi::OVF_NONE;
|
|
1113 signop sign = TYPE_SIGN (type);
|
|
1114 res = wi::mul (w0, w1, sign, &overflow);
|
|
1115 if (overflow && TYPE_OVERFLOW_UNDEFINED (type))
|
|
1116 {
|
|
1117 // For multiplication, the sign of the overflow is given
|
|
1118 // by the comparison of the signs of the operands.
|
|
1119 if (sign == UNSIGNED || w0.sign_mask () == w1.sign_mask ())
|
|
1120 res = wi::max_value (w0.get_precision (), sign);
|
|
1121 else
|
|
1122 res = wi::min_value (w0.get_precision (), sign);
|
|
1123 return false;
|
|
1124 }
|
|
1125 return overflow;
|
|
1126 }
|
|
1127
|
|
1128 void
|
|
1129 operator_mult::wi_fold (value_range &r, tree type,
|
|
1130 const wide_int &lh_lb, const wide_int &lh_ub,
|
|
1131 const wide_int &rh_lb, const wide_int &rh_ub) const
|
|
1132 {
|
|
1133 if (TYPE_OVERFLOW_UNDEFINED (type))
|
|
1134 {
|
|
1135 wi_cross_product (r, type, lh_lb, lh_ub, rh_lb, rh_ub);
|
|
1136 return;
|
|
1137 }
|
|
1138
|
|
1139 // Multiply the ranges when overflow wraps. This is basically fancy
|
|
1140 // code so we don't drop to varying with an unsigned
|
|
1141 // [-3,-1]*[-3,-1].
|
|
1142 //
|
|
1143 // This test requires 2*prec bits if both operands are signed and
|
|
1144 // 2*prec + 2 bits if either is not. Therefore, extend the values
|
|
1145 // using the sign of the result to PREC2. From here on out,
|
|
1146 // everthing is just signed math no matter what the input types
|
|
1147 // were.
|
|
1148
|
|
1149 signop sign = TYPE_SIGN (type);
|
|
1150 unsigned prec = TYPE_PRECISION (type);
|
|
1151 widest2_int min0 = widest2_int::from (lh_lb, sign);
|
|
1152 widest2_int max0 = widest2_int::from (lh_ub, sign);
|
|
1153 widest2_int min1 = widest2_int::from (rh_lb, sign);
|
|
1154 widest2_int max1 = widest2_int::from (rh_ub, sign);
|
|
1155 widest2_int sizem1 = wi::mask <widest2_int> (prec, false);
|
|
1156 widest2_int size = sizem1 + 1;
|
|
1157
|
|
1158 // Canonicalize the intervals.
|
|
1159 if (sign == UNSIGNED)
|
|
1160 {
|
|
1161 if (wi::ltu_p (size, min0 + max0))
|
|
1162 {
|
|
1163 min0 -= size;
|
|
1164 max0 -= size;
|
|
1165 }
|
|
1166 if (wi::ltu_p (size, min1 + max1))
|
|
1167 {
|
|
1168 min1 -= size;
|
|
1169 max1 -= size;
|
|
1170 }
|
|
1171 }
|
|
1172
|
|
1173 // Sort the 4 products so that min is in prod0 and max is in
|
|
1174 // prod3.
|
|
1175 widest2_int prod0 = min0 * min1;
|
|
1176 widest2_int prod1 = min0 * max1;
|
|
1177 widest2_int prod2 = max0 * min1;
|
|
1178 widest2_int prod3 = max0 * max1;
|
|
1179
|
|
1180 // min0min1 > max0max1
|
|
1181 if (prod0 > prod3)
|
|
1182 std::swap (prod0, prod3);
|
|
1183
|
|
1184 // min0max1 > max0min1
|
|
1185 if (prod1 > prod2)
|
|
1186 std::swap (prod1, prod2);
|
|
1187
|
|
1188 if (prod0 > prod1)
|
|
1189 std::swap (prod0, prod1);
|
|
1190
|
|
1191 if (prod2 > prod3)
|
|
1192 std::swap (prod2, prod3);
|
|
1193
|
|
1194 // diff = max - min
|
|
1195 prod2 = prod3 - prod0;
|
|
1196 if (wi::geu_p (prod2, sizem1))
|
|
1197 // The range covers all values.
|
|
1198 r = value_range (type);
|
|
1199 else
|
|
1200 {
|
|
1201 wide_int new_lb = wide_int::from (prod0, prec, sign);
|
|
1202 wide_int new_ub = wide_int::from (prod3, prec, sign);
|
|
1203 create_possibly_reversed_range (r, type, new_lb, new_ub);
|
|
1204 }
|
|
1205 }
|
|
1206
|
|
1207
|
|
1208 class operator_div : public cross_product_operator
|
|
1209 {
|
|
1210 public:
|
|
1211 operator_div (enum tree_code c) { code = c; }
|
|
1212 virtual void wi_fold (value_range &r, tree type,
|
|
1213 const wide_int &lh_lb,
|
|
1214 const wide_int &lh_ub,
|
|
1215 const wide_int &rh_lb,
|
|
1216 const wide_int &rh_ub) const;
|
|
1217 virtual bool wi_op_overflows (wide_int &res, tree type,
|
|
1218 const wide_int &, const wide_int &) const;
|
|
1219 private:
|
|
1220 enum tree_code code;
|
|
1221 };
|
|
1222
|
|
1223 bool
|
|
1224 operator_div::wi_op_overflows (wide_int &res, tree type,
|
|
1225 const wide_int &w0, const wide_int &w1) const
|
|
1226 {
|
|
1227 if (w1 == 0)
|
|
1228 return true;
|
|
1229
|
|
1230 wi::overflow_type overflow = wi::OVF_NONE;
|
|
1231 signop sign = TYPE_SIGN (type);
|
|
1232
|
|
1233 switch (code)
|
|
1234 {
|
|
1235 case EXACT_DIV_EXPR:
|
|
1236 // EXACT_DIV_EXPR is implemented as TRUNC_DIV_EXPR in
|
|
1237 // operator_exact_divide. No need to handle it here.
|
|
1238 gcc_unreachable ();
|
|
1239 break;
|
|
1240 case TRUNC_DIV_EXPR:
|
|
1241 res = wi::div_trunc (w0, w1, sign, &overflow);
|
|
1242 break;
|
|
1243 case FLOOR_DIV_EXPR:
|
|
1244 res = wi::div_floor (w0, w1, sign, &overflow);
|
|
1245 break;
|
|
1246 case ROUND_DIV_EXPR:
|
|
1247 res = wi::div_round (w0, w1, sign, &overflow);
|
|
1248 break;
|
|
1249 case CEIL_DIV_EXPR:
|
|
1250 res = wi::div_ceil (w0, w1, sign, &overflow);
|
|
1251 break;
|
|
1252 default:
|
|
1253 gcc_unreachable ();
|
|
1254 }
|
|
1255
|
|
1256 if (overflow && TYPE_OVERFLOW_UNDEFINED (type))
|
|
1257 {
|
|
1258 // For division, the only case is -INF / -1 = +INF.
|
|
1259 res = wi::max_value (w0.get_precision (), sign);
|
|
1260 return false;
|
|
1261 }
|
|
1262 return overflow;
|
|
1263 }
|
|
1264
|
|
1265 void
|
|
1266 operator_div::wi_fold (value_range &r, tree type,
|
|
1267 const wide_int &lh_lb, const wide_int &lh_ub,
|
|
1268 const wide_int &rh_lb, const wide_int &rh_ub) const
|
|
1269 {
|
|
1270 // If we know we will divide by zero, return undefined.
|
|
1271 if (rh_lb == 0 && rh_ub == 0)
|
|
1272 {
|
|
1273 r = value_range ();
|
|
1274 return;
|
|
1275 }
|
|
1276
|
|
1277 const wide_int dividend_min = lh_lb;
|
|
1278 const wide_int dividend_max = lh_ub;
|
|
1279 const wide_int divisor_min = rh_lb;
|
|
1280 const wide_int divisor_max = rh_ub;
|
|
1281 signop sign = TYPE_SIGN (type);
|
|
1282 unsigned prec = TYPE_PRECISION (type);
|
|
1283 wide_int extra_min, extra_max;
|
|
1284
|
|
1285 // If we know we won't divide by zero, just do the division.
|
|
1286 if (!wi_includes_zero_p (type, divisor_min, divisor_max))
|
|
1287 {
|
|
1288 wi_cross_product (r, type, dividend_min, dividend_max,
|
|
1289 divisor_min, divisor_max);
|
|
1290 return;
|
|
1291 }
|
|
1292
|
|
1293 // If flag_non_call_exceptions, we must not eliminate a division by zero.
|
|
1294 if (cfun->can_throw_non_call_exceptions)
|
|
1295 {
|
|
1296 r = value_range (type);
|
|
1297 return;
|
|
1298 }
|
|
1299
|
|
1300 // If we're definitely dividing by zero, there's nothing to do.
|
|
1301 if (wi_zero_p (type, divisor_min, divisor_max))
|
|
1302 {
|
|
1303 r = value_range ();
|
|
1304 return;
|
|
1305 }
|
|
1306
|
|
1307 // Perform the division in 2 parts, [LB, -1] and [1, UB], which will
|
|
1308 // skip any division by zero.
|
|
1309
|
|
1310 // First divide by the negative numbers, if any.
|
|
1311 if (wi::neg_p (divisor_min, sign))
|
|
1312 wi_cross_product (r, type, dividend_min, dividend_max,
|
|
1313 divisor_min, wi::minus_one (prec));
|
|
1314 else
|
|
1315 r = value_range ();
|
|
1316
|
|
1317 // Then divide by the non-zero positive numbers, if any.
|
|
1318 if (wi::gt_p (divisor_max, wi::zero (prec), sign))
|
|
1319 {
|
|
1320 value_range tmp;
|
|
1321 wi_cross_product (tmp, type, dividend_min, dividend_max,
|
|
1322 wi::one (prec), divisor_max);
|
|
1323 r.union_ (tmp);
|
|
1324 }
|
|
1325 // We shouldn't still have undefined here.
|
|
1326 gcc_checking_assert (!r.undefined_p ());
|
|
1327 }
|
|
1328
|
|
1329 operator_div op_trunc_div (TRUNC_DIV_EXPR);
|
|
1330 operator_div op_floor_div (FLOOR_DIV_EXPR);
|
|
1331 operator_div op_round_div (ROUND_DIV_EXPR);
|
|
1332 operator_div op_ceil_div (CEIL_DIV_EXPR);
|
|
1333
|
|
1334
|
|
1335 class operator_exact_divide : public operator_div
|
|
1336 {
|
|
1337 public:
|
|
1338 operator_exact_divide () : operator_div (TRUNC_DIV_EXPR) { }
|
|
1339 virtual bool op1_range (value_range &r, tree type,
|
|
1340 const value_range &lhs,
|
|
1341 const value_range &op2) const;
|
|
1342
|
|
1343 } op_exact_div;
|
|
1344
|
|
1345 bool
|
|
1346 operator_exact_divide::op1_range (value_range &r, tree type,
|
|
1347 const value_range &lhs,
|
|
1348 const value_range &op2) const
|
|
1349 {
|
|
1350 tree offset;
|
|
1351 // [2, 4] = op1 / [3,3] since its exact divide, no need to worry about
|
|
1352 // remainders in the endpoints, so op1 = [2,4] * [3,3] = [6,12].
|
|
1353 // We wont bother trying to enumerate all the in between stuff :-P
|
|
1354 // TRUE accuraacy is [6,6][9,9][12,12]. This is unlikely to matter most of
|
|
1355 // the time however.
|
|
1356 // If op2 is a multiple of 2, we would be able to set some non-zero bits.
|
|
1357 if (op2.singleton_p (&offset)
|
|
1358 && !integer_zerop (offset))
|
|
1359 return range_op_handler (MULT_EXPR, type)->fold_range (r, type, lhs, op2);
|
|
1360 return false;
|
|
1361 }
|
|
1362
|
|
1363
|
|
1364 class operator_lshift : public cross_product_operator
|
|
1365 {
|
|
1366 public:
|
|
1367 virtual bool fold_range (value_range &r, tree type,
|
|
1368 const value_range &op1,
|
|
1369 const value_range &op2) const;
|
|
1370
|
|
1371 virtual void wi_fold (value_range &r, tree type,
|
|
1372 const wide_int &lh_lb, const wide_int &lh_ub,
|
|
1373 const wide_int &rh_lb, const wide_int &rh_ub) const;
|
|
1374 virtual bool wi_op_overflows (wide_int &res,
|
|
1375 tree type,
|
|
1376 const wide_int &,
|
|
1377 const wide_int &) const;
|
|
1378 } op_lshift;
|
|
1379
|
|
1380 bool
|
|
1381 operator_lshift::fold_range (value_range &r, tree type,
|
|
1382 const value_range &op1,
|
|
1383 const value_range &op2) const
|
|
1384 {
|
|
1385 if (undefined_shift_range_check (r, type, op2))
|
|
1386 return true;
|
|
1387
|
|
1388 // Transform left shifts by constants into multiplies.
|
|
1389 if (op2.singleton_p ())
|
|
1390 {
|
|
1391 unsigned shift = op2.lower_bound ().to_uhwi ();
|
|
1392 wide_int tmp = wi::set_bit_in_zero (shift, TYPE_PRECISION (type));
|
|
1393 value_range mult (type, tmp, tmp);
|
|
1394
|
|
1395 // Force wrapping multiplication.
|
|
1396 bool saved_flag_wrapv = flag_wrapv;
|
|
1397 bool saved_flag_wrapv_pointer = flag_wrapv_pointer;
|
|
1398 flag_wrapv = 1;
|
|
1399 flag_wrapv_pointer = 1;
|
|
1400 bool b = range_op_handler (MULT_EXPR, type)->fold_range (r, type, op1,
|
|
1401 mult);
|
|
1402 flag_wrapv = saved_flag_wrapv;
|
|
1403 flag_wrapv_pointer = saved_flag_wrapv_pointer;
|
|
1404 return b;
|
|
1405 }
|
|
1406 else
|
|
1407 // Otherwise, invoke the generic fold routine.
|
|
1408 return range_operator::fold_range (r, type, op1, op2);
|
|
1409 }
|
|
1410
|
|
1411 void
|
|
1412 operator_lshift::wi_fold (value_range &r, tree type,
|
|
1413 const wide_int &lh_lb, const wide_int &lh_ub,
|
|
1414 const wide_int &rh_lb, const wide_int &rh_ub) const
|
|
1415 {
|
|
1416 signop sign = TYPE_SIGN (type);
|
|
1417 unsigned prec = TYPE_PRECISION (type);
|
|
1418 int overflow_pos = sign == SIGNED ? prec - 1 : prec;
|
|
1419 int bound_shift = overflow_pos - rh_ub.to_shwi ();
|
|
1420 // If bound_shift == HOST_BITS_PER_WIDE_INT, the llshift can
|
|
1421 // overflow. However, for that to happen, rh.max needs to be zero,
|
|
1422 // which means rh is a singleton range of zero, which means it
|
|
1423 // should be handled by the lshift fold_range above.
|
|
1424 wide_int bound = wi::set_bit_in_zero (bound_shift, prec);
|
|
1425 wide_int complement = ~(bound - 1);
|
|
1426 wide_int low_bound, high_bound;
|
|
1427 bool in_bounds = false;
|
|
1428
|
|
1429 if (sign == UNSIGNED)
|
|
1430 {
|
|
1431 low_bound = bound;
|
|
1432 high_bound = complement;
|
|
1433 if (wi::ltu_p (lh_ub, low_bound))
|
|
1434 {
|
|
1435 // [5, 6] << [1, 2] == [10, 24].
|
|
1436 // We're shifting out only zeroes, the value increases
|
|
1437 // monotonically.
|
|
1438 in_bounds = true;
|
|
1439 }
|
|
1440 else if (wi::ltu_p (high_bound, lh_lb))
|
|
1441 {
|
|
1442 // [0xffffff00, 0xffffffff] << [1, 2]
|
|
1443 // == [0xfffffc00, 0xfffffffe].
|
|
1444 // We're shifting out only ones, the value decreases
|
|
1445 // monotonically.
|
|
1446 in_bounds = true;
|
|
1447 }
|
|
1448 }
|
|
1449 else
|
|
1450 {
|
|
1451 // [-1, 1] << [1, 2] == [-4, 4]
|
|
1452 low_bound = complement;
|
|
1453 high_bound = bound;
|
|
1454 if (wi::lts_p (lh_ub, high_bound)
|
|
1455 && wi::lts_p (low_bound, lh_lb))
|
|
1456 {
|
|
1457 // For non-negative numbers, we're shifting out only zeroes,
|
|
1458 // the value increases monotonically. For negative numbers,
|
|
1459 // we're shifting out only ones, the value decreases
|
|
1460 // monotonically.
|
|
1461 in_bounds = true;
|
|
1462 }
|
|
1463 }
|
|
1464
|
|
1465 if (in_bounds)
|
|
1466 wi_cross_product (r, type, lh_lb, lh_ub, rh_lb, rh_ub);
|
|
1467 else
|
|
1468 r = value_range (type);
|
|
1469 }
|
|
1470
|
|
1471 bool
|
|
1472 operator_lshift::wi_op_overflows (wide_int &res, tree type,
|
|
1473 const wide_int &w0, const wide_int &w1) const
|
|
1474 {
|
|
1475 signop sign = TYPE_SIGN (type);
|
|
1476 if (wi::neg_p (w1))
|
|
1477 {
|
|
1478 // It's unclear from the C standard whether shifts can overflow.
|
|
1479 // The following code ignores overflow; perhaps a C standard
|
|
1480 // interpretation ruling is needed.
|
|
1481 res = wi::rshift (w0, -w1, sign);
|
|
1482 }
|
|
1483 else
|
|
1484 res = wi::lshift (w0, w1);
|
|
1485 return false;
|
|
1486 }
|
|
1487
|
|
1488
|
|
1489 class operator_rshift : public cross_product_operator
|
|
1490 {
|
|
1491 public:
|
|
1492 virtual bool fold_range (value_range &r, tree type,
|
|
1493 const value_range &op1,
|
|
1494 const value_range &op2) const;
|
|
1495 virtual void wi_fold (value_range &r, tree type,
|
|
1496 const wide_int &lh_lb,
|
|
1497 const wide_int &lh_ub,
|
|
1498 const wide_int &rh_lb,
|
|
1499 const wide_int &rh_ub) const;
|
|
1500 virtual bool wi_op_overflows (wide_int &res,
|
|
1501 tree type,
|
|
1502 const wide_int &w0,
|
|
1503 const wide_int &w1) const;
|
|
1504 } op_rshift;
|
|
1505
|
|
1506 bool
|
|
1507 operator_rshift::wi_op_overflows (wide_int &res,
|
|
1508 tree type,
|
|
1509 const wide_int &w0,
|
|
1510 const wide_int &w1) const
|
|
1511 {
|
|
1512 signop sign = TYPE_SIGN (type);
|
|
1513 if (wi::neg_p (w1))
|
|
1514 res = wi::lshift (w0, -w1);
|
|
1515 else
|
|
1516 {
|
|
1517 // It's unclear from the C standard whether shifts can overflow.
|
|
1518 // The following code ignores overflow; perhaps a C standard
|
|
1519 // interpretation ruling is needed.
|
|
1520 res = wi::rshift (w0, w1, sign);
|
|
1521 }
|
|
1522 return false;
|
|
1523 }
|
|
1524
|
|
1525 bool
|
|
1526 operator_rshift::fold_range (value_range &r, tree type,
|
|
1527 const value_range &op1,
|
|
1528 const value_range &op2) const
|
|
1529 {
|
|
1530 // Invoke the generic fold routine if not undefined..
|
|
1531 if (undefined_shift_range_check (r, type, op2))
|
|
1532 return true;
|
|
1533
|
|
1534 return range_operator::fold_range (r, type, op1, op2);
|
|
1535 }
|
|
1536
|
|
1537 void
|
|
1538 operator_rshift::wi_fold (value_range &r, tree type,
|
|
1539 const wide_int &lh_lb, const wide_int &lh_ub,
|
|
1540 const wide_int &rh_lb, const wide_int &rh_ub) const
|
|
1541 {
|
|
1542 wi_cross_product (r, type, lh_lb, lh_ub, rh_lb, rh_ub);
|
|
1543 }
|
|
1544
|
|
1545
|
|
1546 class operator_cast: public range_operator
|
|
1547 {
|
|
1548 public:
|
|
1549 virtual bool fold_range (value_range &r, tree type,
|
|
1550 const value_range &op1,
|
|
1551 const value_range &op2) const;
|
|
1552 virtual bool op1_range (value_range &r, tree type,
|
|
1553 const value_range &lhs,
|
|
1554 const value_range &op2) const;
|
|
1555
|
|
1556 } op_convert;
|
|
1557
|
|
1558 bool
|
|
1559 operator_cast::fold_range (value_range &r, tree type ATTRIBUTE_UNUSED,
|
|
1560 const value_range &lh,
|
|
1561 const value_range &rh) const
|
|
1562 {
|
|
1563 if (empty_range_check (r, lh, rh))
|
|
1564 return true;
|
|
1565
|
|
1566 tree inner = lh.type ();
|
|
1567 tree outer = rh.type ();
|
|
1568 gcc_checking_assert (rh.varying_p ());
|
|
1569 gcc_checking_assert (types_compatible_p (outer, type));
|
|
1570 signop inner_sign = TYPE_SIGN (inner);
|
|
1571 signop outer_sign = TYPE_SIGN (outer);
|
|
1572 unsigned inner_prec = TYPE_PRECISION (inner);
|
|
1573 unsigned outer_prec = TYPE_PRECISION (outer);
|
|
1574
|
|
1575 // Start with an empty range and add subranges.
|
|
1576 r = value_range ();
|
|
1577 for (unsigned x = 0; x < lh.num_pairs (); ++x)
|
|
1578 {
|
|
1579 wide_int lh_lb = lh.lower_bound (x);
|
|
1580 wide_int lh_ub = lh.upper_bound (x);
|
|
1581
|
|
1582 // If the conversion is not truncating we can convert the min
|
|
1583 // and max values and canonicalize the resulting range.
|
|
1584 // Otherwise, we can do the conversion if the size of the range
|
|
1585 // is less than what the precision of the target type can
|
|
1586 // represent.
|
|
1587 if (outer_prec >= inner_prec
|
|
1588 || wi::rshift (wi::sub (lh_ub, lh_lb),
|
|
1589 wi::uhwi (outer_prec, inner_prec),
|
|
1590 inner_sign) == 0)
|
|
1591 {
|
|
1592 wide_int min = wide_int::from (lh_lb, outer_prec, inner_sign);
|
|
1593 wide_int max = wide_int::from (lh_ub, outer_prec, inner_sign);
|
|
1594 if (!wi::eq_p (min, wi::min_value (outer_prec, outer_sign))
|
|
1595 || !wi::eq_p (max, wi::max_value (outer_prec, outer_sign)))
|
|
1596 {
|
|
1597 value_range tmp;
|
|
1598 create_possibly_reversed_range (tmp, type, min, max);
|
|
1599 r.union_ (tmp);
|
|
1600 continue;
|
|
1601 }
|
|
1602 }
|
|
1603 r = value_range (type);
|
|
1604 break;
|
|
1605 }
|
|
1606 return true;
|
|
1607 }
|
|
1608
|
|
1609 bool
|
|
1610 operator_cast::op1_range (value_range &r, tree type,
|
|
1611 const value_range &lhs,
|
|
1612 const value_range &op2) const
|
|
1613 {
|
|
1614 tree lhs_type = lhs.type ();
|
|
1615 value_range tmp;
|
|
1616 gcc_checking_assert (types_compatible_p (op2.type(), type));
|
|
1617
|
|
1618 // If the precision of the LHS is smaller than the precision of the
|
|
1619 // RHS, then there would be truncation of the value on the RHS, and
|
|
1620 // so we can tell nothing about it.
|
|
1621 if (TYPE_PRECISION (lhs_type) < TYPE_PRECISION (type))
|
|
1622 {
|
|
1623 // If we've been passed an actual value for the RHS rather than
|
|
1624 // the type, see if it fits the LHS, and if so, then we can allow
|
|
1625 // it.
|
|
1626 fold_range (r, lhs_type, op2, value_range (lhs_type));
|
|
1627 fold_range (tmp, type, r, value_range (type));
|
|
1628 if (tmp == op2)
|
|
1629 {
|
|
1630 // We know the value of the RHS fits in the LHS type, so
|
|
1631 // convert the LHS and remove any values that arent in OP2.
|
|
1632 fold_range (r, type, lhs, value_range (type));
|
|
1633 r.intersect (op2);
|
|
1634 return true;
|
|
1635 }
|
|
1636 // Special case if the LHS is a boolean. A 0 means the RHS is
|
|
1637 // zero, and a 1 means the RHS is non-zero.
|
|
1638 if (TREE_CODE (lhs_type) == BOOLEAN_TYPE)
|
|
1639 {
|
|
1640 // If the LHS is unknown, the result is whatever op2 already is.
|
|
1641 if (!lhs.singleton_p ())
|
|
1642 {
|
|
1643 r = op2;
|
|
1644 return true;
|
|
1645 }
|
|
1646 // Boolean casts are weird in GCC. It's actually an implied
|
|
1647 // mask with 0x01, so all that is known is whether the
|
|
1648 // rightmost bit is 0 or 1, which implies the only value
|
|
1649 // *not* in the RHS is 0 or -1.
|
|
1650 unsigned prec = TYPE_PRECISION (type);
|
|
1651 if (lhs.zero_p ())
|
|
1652 r = value_range (type, wi::minus_one (prec), wi::minus_one (prec),
|
|
1653 VR_ANTI_RANGE);
|
|
1654 else
|
|
1655 r = value_range (type, wi::zero (prec), wi::zero (prec),
|
|
1656 VR_ANTI_RANGE);
|
|
1657 // And intersect it with what we know about op2.
|
|
1658 r.intersect (op2);
|
|
1659 }
|
|
1660 else
|
|
1661 // Otherwise we'll have to assume it's whatever we know about op2.
|
|
1662 r = op2;
|
|
1663 return true;
|
|
1664 }
|
|
1665
|
|
1666 // If the LHS precision is greater than the rhs precision, the LHS
|
|
1667 // range is restricted to the range of the RHS by this
|
|
1668 // assignment.
|
|
1669 if (TYPE_PRECISION (lhs_type) > TYPE_PRECISION (type))
|
|
1670 {
|
|
1671 // Cast the range of the RHS to the type of the LHS.
|
|
1672 fold_range (tmp, lhs_type, value_range (type), value_range (lhs_type));
|
|
1673 // Intersect this with the LHS range will produce the range, which
|
|
1674 // will be cast to the RHS type before returning.
|
|
1675 tmp.intersect (lhs);
|
|
1676 }
|
|
1677 else
|
|
1678 tmp = lhs;
|
|
1679
|
|
1680 // Cast the calculated range to the type of the RHS.
|
|
1681 fold_range (r, type, tmp, value_range (type));
|
|
1682 return true;
|
|
1683 }
|
|
1684
|
|
1685
|
|
1686 class operator_logical_and : public range_operator
|
|
1687 {
|
|
1688 public:
|
|
1689 virtual bool fold_range (value_range &r, tree type,
|
|
1690 const value_range &lh,
|
|
1691 const value_range &rh) const;
|
|
1692 virtual bool op1_range (value_range &r, tree type,
|
|
1693 const value_range &lhs,
|
|
1694 const value_range &op2) const;
|
|
1695 virtual bool op2_range (value_range &r, tree type,
|
|
1696 const value_range &lhs,
|
|
1697 const value_range &op1) const;
|
|
1698 } op_logical_and;
|
|
1699
|
|
1700
|
|
1701 bool
|
|
1702 operator_logical_and::fold_range (value_range &r, tree type,
|
|
1703 const value_range &lh,
|
|
1704 const value_range &rh) const
|
|
1705 {
|
|
1706 if (empty_range_check (r, lh, rh))
|
|
1707 return true;
|
|
1708
|
|
1709 // 0 && anything is 0.
|
|
1710 if ((wi::eq_p (lh.lower_bound (), 0) && wi::eq_p (lh.upper_bound (), 0))
|
|
1711 || (wi::eq_p (lh.lower_bound (), 0) && wi::eq_p (rh.upper_bound (), 0)))
|
|
1712 r = range_false (type);
|
|
1713 else if (lh.contains_p (build_zero_cst (lh.type ()))
|
|
1714 || rh.contains_p (build_zero_cst (rh.type ())))
|
|
1715 // To reach this point, there must be a logical 1 on each side, and
|
|
1716 // the only remaining question is whether there is a zero or not.
|
|
1717 r = range_true_and_false (type);
|
|
1718 else
|
|
1719 r = range_true (type);
|
|
1720 return true;
|
|
1721 }
|
|
1722
|
|
1723 bool
|
|
1724 operator_logical_and::op1_range (value_range &r, tree type,
|
|
1725 const value_range &lhs,
|
|
1726 const value_range &op2 ATTRIBUTE_UNUSED) const
|
|
1727 {
|
|
1728 switch (get_bool_state (r, lhs, type))
|
|
1729 {
|
|
1730 case BRS_TRUE:
|
|
1731 // A true result means both sides of the AND must be true.
|
|
1732 r = range_true (type);
|
|
1733 break;
|
|
1734 default:
|
|
1735 // Any other result means only one side has to be false, the
|
|
1736 // other side can be anything. So we cannott be sure of any
|
|
1737 // result here.
|
|
1738 r = range_true_and_false (type);
|
|
1739 break;
|
|
1740 }
|
|
1741 return true;
|
|
1742 }
|
|
1743
|
|
1744 bool
|
|
1745 operator_logical_and::op2_range (value_range &r, tree type,
|
|
1746 const value_range &lhs,
|
|
1747 const value_range &op1) const
|
|
1748 {
|
|
1749 return operator_logical_and::op1_range (r, type, lhs, op1);
|
|
1750 }
|
|
1751
|
|
1752
|
|
1753 class operator_bitwise_and : public range_operator
|
|
1754 {
|
|
1755 public:
|
|
1756 virtual bool op1_range (value_range &r, tree type,
|
|
1757 const value_range &lhs,
|
|
1758 const value_range &op2) const;
|
|
1759 virtual bool op2_range (value_range &r, tree type,
|
|
1760 const value_range &lhs,
|
|
1761 const value_range &op1) const;
|
|
1762 virtual void wi_fold (value_range &r, tree type,
|
|
1763 const wide_int &lh_lb,
|
|
1764 const wide_int &lh_ub,
|
|
1765 const wide_int &rh_lb,
|
|
1766 const wide_int &rh_ub) const;
|
|
1767 } op_bitwise_and;
|
|
1768
|
|
1769 // Optimize BIT_AND_EXPR and BIT_IOR_EXPR in terms of a mask if
|
|
1770 // possible. Basically, see if we can optimize:
|
|
1771 //
|
|
1772 // [LB, UB] op Z
|
|
1773 // into:
|
|
1774 // [LB op Z, UB op Z]
|
|
1775 //
|
|
1776 // If the optimization was successful, accumulate the range in R and
|
|
1777 // return TRUE.
|
|
1778
|
|
1779 static bool
|
|
1780 wi_optimize_and_or (value_range &r,
|
|
1781 enum tree_code code,
|
|
1782 tree type,
|
|
1783 const wide_int &lh_lb, const wide_int &lh_ub,
|
|
1784 const wide_int &rh_lb, const wide_int &rh_ub)
|
|
1785 {
|
|
1786 // Calculate the singleton mask among the ranges, if any.
|
|
1787 wide_int lower_bound, upper_bound, mask;
|
|
1788 if (wi::eq_p (rh_lb, rh_ub))
|
|
1789 {
|
|
1790 mask = rh_lb;
|
|
1791 lower_bound = lh_lb;
|
|
1792 upper_bound = lh_ub;
|
|
1793 }
|
|
1794 else if (wi::eq_p (lh_lb, lh_ub))
|
|
1795 {
|
|
1796 mask = lh_lb;
|
|
1797 lower_bound = rh_lb;
|
|
1798 upper_bound = rh_ub;
|
|
1799 }
|
|
1800 else
|
|
1801 return false;
|
|
1802
|
|
1803 // If Z is a constant which (for op | its bitwise not) has n
|
|
1804 // consecutive least significant bits cleared followed by m 1
|
|
1805 // consecutive bits set immediately above it and either
|
|
1806 // m + n == precision, or (x >> (m + n)) == (y >> (m + n)).
|
|
1807 //
|
|
1808 // The least significant n bits of all the values in the range are
|
|
1809 // cleared or set, the m bits above it are preserved and any bits
|
|
1810 // above these are required to be the same for all values in the
|
|
1811 // range.
|
|
1812 wide_int w = mask;
|
|
1813 int m = 0, n = 0;
|
|
1814 if (code == BIT_IOR_EXPR)
|
|
1815 w = ~w;
|
|
1816 if (wi::eq_p (w, 0))
|
|
1817 n = w.get_precision ();
|
|
1818 else
|
|
1819 {
|
|
1820 n = wi::ctz (w);
|
|
1821 w = ~(w | wi::mask (n, false, w.get_precision ()));
|
|
1822 if (wi::eq_p (w, 0))
|
|
1823 m = w.get_precision () - n;
|
|
1824 else
|
|
1825 m = wi::ctz (w) - n;
|
|
1826 }
|
|
1827 wide_int new_mask = wi::mask (m + n, true, w.get_precision ());
|
|
1828 if ((new_mask & lower_bound) != (new_mask & upper_bound))
|
|
1829 return false;
|
|
1830
|
|
1831 wide_int res_lb, res_ub;
|
|
1832 if (code == BIT_AND_EXPR)
|
|
1833 {
|
|
1834 res_lb = wi::bit_and (lower_bound, mask);
|
|
1835 res_ub = wi::bit_and (upper_bound, mask);
|
|
1836 }
|
|
1837 else if (code == BIT_IOR_EXPR)
|
|
1838 {
|
|
1839 res_lb = wi::bit_or (lower_bound, mask);
|
|
1840 res_ub = wi::bit_or (upper_bound, mask);
|
|
1841 }
|
|
1842 else
|
|
1843 gcc_unreachable ();
|
|
1844 value_range_with_overflow (r, type, res_lb, res_ub);
|
|
1845 return true;
|
|
1846 }
|
|
1847
|
|
1848 // For range [LB, UB] compute two wide_int bit masks.
|
|
1849 //
|
|
1850 // In the MAYBE_NONZERO bit mask, if some bit is unset, it means that
|
|
1851 // for all numbers in the range the bit is 0, otherwise it might be 0
|
|
1852 // or 1.
|
|
1853 //
|
|
1854 // In the MUSTBE_NONZERO bit mask, if some bit is set, it means that
|
|
1855 // for all numbers in the range the bit is 1, otherwise it might be 0
|
|
1856 // or 1.
|
|
1857
|
|
1858 void
|
|
1859 wi_set_zero_nonzero_bits (tree type,
|
|
1860 const wide_int &lb, const wide_int &ub,
|
|
1861 wide_int &maybe_nonzero,
|
|
1862 wide_int &mustbe_nonzero)
|
|
1863 {
|
|
1864 signop sign = TYPE_SIGN (type);
|
|
1865
|
|
1866 if (wi::eq_p (lb, ub))
|
|
1867 maybe_nonzero = mustbe_nonzero = lb;
|
|
1868 else if (wi::ge_p (lb, 0, sign) || wi::lt_p (ub, 0, sign))
|
|
1869 {
|
|
1870 wide_int xor_mask = lb ^ ub;
|
|
1871 maybe_nonzero = lb | ub;
|
|
1872 mustbe_nonzero = lb & ub;
|
|
1873 if (xor_mask != 0)
|
|
1874 {
|
|
1875 wide_int mask = wi::mask (wi::floor_log2 (xor_mask), false,
|
|
1876 maybe_nonzero.get_precision ());
|
|
1877 maybe_nonzero = maybe_nonzero | mask;
|
|
1878 mustbe_nonzero = wi::bit_and_not (mustbe_nonzero, mask);
|
|
1879 }
|
|
1880 }
|
|
1881 else
|
|
1882 {
|
|
1883 maybe_nonzero = wi::minus_one (lb.get_precision ());
|
|
1884 mustbe_nonzero = wi::zero (lb.get_precision ());
|
|
1885 }
|
|
1886 }
|
|
1887
|
|
1888 void
|
|
1889 operator_bitwise_and::wi_fold (value_range &r, tree type,
|
|
1890 const wide_int &lh_lb,
|
|
1891 const wide_int &lh_ub,
|
|
1892 const wide_int &rh_lb,
|
|
1893 const wide_int &rh_ub) const
|
|
1894 {
|
|
1895 if (wi_optimize_and_or (r, BIT_AND_EXPR, type, lh_lb, lh_ub, rh_lb, rh_ub))
|
|
1896 return;
|
|
1897
|
|
1898 wide_int maybe_nonzero_lh, mustbe_nonzero_lh;
|
|
1899 wide_int maybe_nonzero_rh, mustbe_nonzero_rh;
|
|
1900 wi_set_zero_nonzero_bits (type, lh_lb, lh_ub,
|
|
1901 maybe_nonzero_lh, mustbe_nonzero_lh);
|
|
1902 wi_set_zero_nonzero_bits (type, rh_lb, rh_ub,
|
|
1903 maybe_nonzero_rh, mustbe_nonzero_rh);
|
|
1904
|
|
1905 wide_int new_lb = mustbe_nonzero_lh & mustbe_nonzero_rh;
|
|
1906 wide_int new_ub = maybe_nonzero_lh & maybe_nonzero_rh;
|
|
1907 signop sign = TYPE_SIGN (type);
|
|
1908 unsigned prec = TYPE_PRECISION (type);
|
|
1909 // If both input ranges contain only negative values, we can
|
|
1910 // truncate the result range maximum to the minimum of the
|
|
1911 // input range maxima.
|
|
1912 if (wi::lt_p (lh_ub, 0, sign) && wi::lt_p (rh_ub, 0, sign))
|
|
1913 {
|
|
1914 new_ub = wi::min (new_ub, lh_ub, sign);
|
|
1915 new_ub = wi::min (new_ub, rh_ub, sign);
|
|
1916 }
|
|
1917 // If either input range contains only non-negative values
|
|
1918 // we can truncate the result range maximum to the respective
|
|
1919 // maximum of the input range.
|
|
1920 if (wi::ge_p (lh_lb, 0, sign))
|
|
1921 new_ub = wi::min (new_ub, lh_ub, sign);
|
|
1922 if (wi::ge_p (rh_lb, 0, sign))
|
|
1923 new_ub = wi::min (new_ub, rh_ub, sign);
|
|
1924 // PR68217: In case of signed & sign-bit-CST should
|
|
1925 // result in [-INF, 0] instead of [-INF, INF].
|
|
1926 if (wi::gt_p (new_lb, new_ub, sign))
|
|
1927 {
|
|
1928 wide_int sign_bit = wi::set_bit_in_zero (prec - 1, prec);
|
|
1929 if (sign == SIGNED
|
|
1930 && ((wi::eq_p (lh_lb, lh_ub)
|
|
1931 && !wi::cmps (lh_lb, sign_bit))
|
|
1932 || (wi::eq_p (rh_lb, rh_ub)
|
|
1933 && !wi::cmps (rh_lb, sign_bit))))
|
|
1934 {
|
|
1935 new_lb = wi::min_value (prec, sign);
|
|
1936 new_ub = wi::zero (prec);
|
|
1937 }
|
|
1938 }
|
|
1939 // If the limits got swapped around, return varying.
|
|
1940 if (wi::gt_p (new_lb, new_ub,sign))
|
|
1941 r = value_range (type);
|
|
1942 else
|
|
1943 value_range_with_overflow (r, type, new_lb, new_ub);
|
|
1944 }
|
|
1945
|
|
1946 bool
|
|
1947 operator_bitwise_and::op1_range (value_range &r, tree type,
|
|
1948 const value_range &lhs,
|
|
1949 const value_range &op2) const
|
|
1950 {
|
|
1951 // If this is really a logical wi_fold, call that.
|
|
1952 if (types_compatible_p (type, boolean_type_node))
|
|
1953 return op_logical_and.op1_range (r, type, lhs, op2);
|
|
1954
|
|
1955 // For now do nothing with bitwise AND of value_range's.
|
|
1956 r.set_varying (type);
|
|
1957 return true;
|
|
1958 }
|
|
1959
|
|
1960 bool
|
|
1961 operator_bitwise_and::op2_range (value_range &r, tree type,
|
|
1962 const value_range &lhs,
|
|
1963 const value_range &op1) const
|
|
1964 {
|
|
1965 return operator_bitwise_and::op1_range (r, type, lhs, op1);
|
|
1966 }
|
|
1967
|
|
1968
|
|
1969 class operator_logical_or : public range_operator
|
|
1970 {
|
|
1971 public:
|
|
1972 virtual bool fold_range (value_range &r, tree type,
|
|
1973 const value_range &lh,
|
|
1974 const value_range &rh) const;
|
|
1975 virtual bool op1_range (value_range &r, tree type,
|
|
1976 const value_range &lhs,
|
|
1977 const value_range &op2) const;
|
|
1978 virtual bool op2_range (value_range &r, tree type,
|
|
1979 const value_range &lhs,
|
|
1980 const value_range &op1) const;
|
|
1981 } op_logical_or;
|
|
1982
|
|
1983 bool
|
|
1984 operator_logical_or::fold_range (value_range &r, tree type ATTRIBUTE_UNUSED,
|
|
1985 const value_range &lh,
|
|
1986 const value_range &rh) const
|
|
1987 {
|
|
1988 if (empty_range_check (r, lh, rh))
|
|
1989 return true;
|
|
1990
|
|
1991 r = lh;
|
|
1992 r.union_ (rh);
|
|
1993 return true;
|
|
1994 }
|
|
1995
|
|
1996 bool
|
|
1997 operator_logical_or::op1_range (value_range &r, tree type,
|
|
1998 const value_range &lhs,
|
|
1999 const value_range &op2 ATTRIBUTE_UNUSED) const
|
|
2000 {
|
|
2001 switch (get_bool_state (r, lhs, type))
|
|
2002 {
|
|
2003 case BRS_FALSE:
|
|
2004 // A false result means both sides of the OR must be false.
|
|
2005 r = range_false (type);
|
|
2006 break;
|
|
2007 default:
|
|
2008 // Any other result means only one side has to be true, the
|
|
2009 // other side can be anything. so we can't be sure of any result
|
|
2010 // here.
|
|
2011 r = range_true_and_false (type);
|
|
2012 break;
|
|
2013 }
|
|
2014 return true;
|
|
2015 }
|
|
2016
|
|
2017 bool
|
|
2018 operator_logical_or::op2_range (value_range &r, tree type,
|
|
2019 const value_range &lhs,
|
|
2020 const value_range &op1) const
|
|
2021 {
|
|
2022 return operator_logical_or::op1_range (r, type, lhs, op1);
|
|
2023 }
|
|
2024
|
|
2025
|
|
2026 class operator_bitwise_or : public range_operator
|
|
2027 {
|
|
2028 public:
|
|
2029 virtual bool op1_range (value_range &r, tree type,
|
|
2030 const value_range &lhs,
|
|
2031 const value_range &op2) const;
|
|
2032 virtual bool op2_range (value_range &r, tree type,
|
|
2033 const value_range &lhs,
|
|
2034 const value_range &op1) const;
|
|
2035 virtual void wi_fold (value_range &r, tree type,
|
|
2036 const wide_int &lh_lb,
|
|
2037 const wide_int &lh_ub,
|
|
2038 const wide_int &rh_lb,
|
|
2039 const wide_int &rh_ub) const;
|
|
2040 } op_bitwise_or;
|
|
2041
|
|
2042 void
|
|
2043 operator_bitwise_or::wi_fold (value_range &r, tree type,
|
|
2044 const wide_int &lh_lb,
|
|
2045 const wide_int &lh_ub,
|
|
2046 const wide_int &rh_lb,
|
|
2047 const wide_int &rh_ub) const
|
|
2048 {
|
|
2049 if (wi_optimize_and_or (r, BIT_IOR_EXPR, type, lh_lb, lh_ub, rh_lb, rh_ub))
|
|
2050 return;
|
|
2051
|
|
2052 wide_int maybe_nonzero_lh, mustbe_nonzero_lh;
|
|
2053 wide_int maybe_nonzero_rh, mustbe_nonzero_rh;
|
|
2054 wi_set_zero_nonzero_bits (type, lh_lb, lh_ub,
|
|
2055 maybe_nonzero_lh, mustbe_nonzero_lh);
|
|
2056 wi_set_zero_nonzero_bits (type, rh_lb, rh_ub,
|
|
2057 maybe_nonzero_rh, mustbe_nonzero_rh);
|
|
2058 wide_int new_lb = mustbe_nonzero_lh | mustbe_nonzero_rh;
|
|
2059 wide_int new_ub = maybe_nonzero_lh | maybe_nonzero_rh;
|
|
2060 signop sign = TYPE_SIGN (type);
|
|
2061 // If the input ranges contain only positive values we can
|
|
2062 // truncate the minimum of the result range to the maximum
|
|
2063 // of the input range minima.
|
|
2064 if (wi::ge_p (lh_lb, 0, sign)
|
|
2065 && wi::ge_p (rh_lb, 0, sign))
|
|
2066 {
|
|
2067 new_lb = wi::max (new_lb, lh_lb, sign);
|
|
2068 new_lb = wi::max (new_lb, rh_lb, sign);
|
|
2069 }
|
|
2070 // If either input range contains only negative values
|
|
2071 // we can truncate the minimum of the result range to the
|
|
2072 // respective minimum range.
|
|
2073 if (wi::lt_p (lh_ub, 0, sign))
|
|
2074 new_lb = wi::max (new_lb, lh_lb, sign);
|
|
2075 if (wi::lt_p (rh_ub, 0, sign))
|
|
2076 new_lb = wi::max (new_lb, rh_lb, sign);
|
|
2077 // If the limits got swapped around, return varying.
|
|
2078 if (wi::gt_p (new_lb, new_ub,sign))
|
|
2079 r = value_range (type);
|
|
2080 else
|
|
2081 value_range_with_overflow (r, type, new_lb, new_ub);
|
|
2082 }
|
|
2083
|
|
2084 bool
|
|
2085 operator_bitwise_or::op1_range (value_range &r, tree type,
|
|
2086 const value_range &lhs,
|
|
2087 const value_range &op2) const
|
|
2088 {
|
|
2089 // If this is really a logical wi_fold, call that.
|
|
2090 if (types_compatible_p (type, boolean_type_node))
|
|
2091 return op_logical_or.op1_range (r, type, lhs, op2);
|
|
2092
|
|
2093 // For now do nothing with bitwise OR of value_range's.
|
|
2094 r.set_varying (type);
|
|
2095 return true;
|
|
2096 }
|
|
2097
|
|
2098 bool
|
|
2099 operator_bitwise_or::op2_range (value_range &r, tree type,
|
|
2100 const value_range &lhs,
|
|
2101 const value_range &op1) const
|
|
2102 {
|
|
2103 return operator_bitwise_or::op1_range (r, type, lhs, op1);
|
|
2104 }
|
|
2105
|
|
2106
|
|
2107 class operator_bitwise_xor : public range_operator
|
|
2108 {
|
|
2109 public:
|
|
2110 virtual void wi_fold (value_range &r, tree type,
|
|
2111 const wide_int &lh_lb,
|
|
2112 const wide_int &lh_ub,
|
|
2113 const wide_int &rh_lb,
|
|
2114 const wide_int &rh_ub) const;
|
|
2115 } op_bitwise_xor;
|
|
2116
|
|
2117 void
|
|
2118 operator_bitwise_xor::wi_fold (value_range &r, tree type,
|
|
2119 const wide_int &lh_lb,
|
|
2120 const wide_int &lh_ub,
|
|
2121 const wide_int &rh_lb,
|
|
2122 const wide_int &rh_ub) const
|
|
2123 {
|
|
2124 signop sign = TYPE_SIGN (type);
|
|
2125 wide_int maybe_nonzero_lh, mustbe_nonzero_lh;
|
|
2126 wide_int maybe_nonzero_rh, mustbe_nonzero_rh;
|
|
2127 wi_set_zero_nonzero_bits (type, lh_lb, lh_ub,
|
|
2128 maybe_nonzero_lh, mustbe_nonzero_lh);
|
|
2129 wi_set_zero_nonzero_bits (type, rh_lb, rh_ub,
|
|
2130 maybe_nonzero_rh, mustbe_nonzero_rh);
|
|
2131
|
|
2132 wide_int result_zero_bits = ((mustbe_nonzero_lh & mustbe_nonzero_rh)
|
|
2133 | ~(maybe_nonzero_lh | maybe_nonzero_rh));
|
|
2134 wide_int result_one_bits
|
|
2135 = (wi::bit_and_not (mustbe_nonzero_lh, maybe_nonzero_rh)
|
|
2136 | wi::bit_and_not (mustbe_nonzero_rh, maybe_nonzero_lh));
|
|
2137 wide_int new_ub = ~result_zero_bits;
|
|
2138 wide_int new_lb = result_one_bits;
|
|
2139
|
|
2140 // If the range has all positive or all negative values, the result
|
|
2141 // is better than VARYING.
|
|
2142 if (wi::lt_p (new_lb, 0, sign) || wi::ge_p (new_ub, 0, sign))
|
|
2143 value_range_with_overflow (r, type, new_lb, new_ub);
|
|
2144 else
|
|
2145 r = value_range (type);
|
|
2146 }
|
|
2147
|
|
2148
|
|
2149 class operator_trunc_mod : public range_operator
|
|
2150 {
|
|
2151 public:
|
|
2152 virtual void wi_fold (value_range &r, tree type,
|
|
2153 const wide_int &lh_lb,
|
|
2154 const wide_int &lh_ub,
|
|
2155 const wide_int &rh_lb,
|
|
2156 const wide_int &rh_ub) const;
|
|
2157 } op_trunc_mod;
|
|
2158
|
|
2159 void
|
|
2160 operator_trunc_mod::wi_fold (value_range &r, tree type,
|
|
2161 const wide_int &lh_lb,
|
|
2162 const wide_int &lh_ub,
|
|
2163 const wide_int &rh_lb,
|
|
2164 const wide_int &rh_ub) const
|
|
2165 {
|
|
2166 wide_int new_lb, new_ub, tmp;
|
|
2167 signop sign = TYPE_SIGN (type);
|
|
2168 unsigned prec = TYPE_PRECISION (type);
|
|
2169
|
|
2170 // Mod 0 is undefined. Return undefined.
|
|
2171 if (wi_zero_p (type, rh_lb, rh_ub))
|
|
2172 {
|
|
2173 r = value_range ();
|
|
2174 return;
|
|
2175 }
|
|
2176
|
|
2177 // ABS (A % B) < ABS (B) and either 0 <= A % B <= A or A <= A % B <= 0.
|
|
2178 new_ub = rh_ub - 1;
|
|
2179 if (sign == SIGNED)
|
|
2180 {
|
|
2181 tmp = -1 - rh_lb;
|
|
2182 new_ub = wi::smax (new_ub, tmp);
|
|
2183 }
|
|
2184
|
|
2185 if (sign == UNSIGNED)
|
|
2186 new_lb = wi::zero (prec);
|
|
2187 else
|
|
2188 {
|
|
2189 new_lb = -new_ub;
|
|
2190 tmp = lh_lb;
|
|
2191 if (wi::gts_p (tmp, 0))
|
|
2192 tmp = wi::zero (prec);
|
|
2193 new_lb = wi::smax (new_lb, tmp);
|
|
2194 }
|
|
2195 tmp = lh_ub;
|
|
2196 if (sign == SIGNED && wi::neg_p (tmp))
|
|
2197 tmp = wi::zero (prec);
|
|
2198 new_ub = wi::min (new_ub, tmp, sign);
|
|
2199
|
|
2200 value_range_with_overflow (r, type, new_lb, new_ub);
|
|
2201 }
|
|
2202
|
|
2203
|
|
2204 class operator_logical_not : public range_operator
|
|
2205 {
|
|
2206 public:
|
|
2207 virtual bool fold_range (value_range &r, tree type,
|
|
2208 const value_range &lh,
|
|
2209 const value_range &rh) const;
|
|
2210 virtual bool op1_range (value_range &r, tree type,
|
|
2211 const value_range &lhs,
|
|
2212 const value_range &op2) const;
|
|
2213 } op_logical_not;
|
|
2214
|
|
2215 // Folding a logical NOT, oddly enough, involves doing nothing on the
|
|
2216 // forward pass through. During the initial walk backwards, the
|
|
2217 // logical NOT reversed the desired outcome on the way back, so on the
|
|
2218 // way forward all we do is pass the range forward.
|
|
2219 //
|
|
2220 // b_2 = x_1 < 20
|
|
2221 // b_3 = !b_2
|
|
2222 // if (b_3)
|
|
2223 // to determine the TRUE branch, walking backward
|
|
2224 // if (b_3) if ([1,1])
|
|
2225 // b_3 = !b_2 [1,1] = ![0,0]
|
|
2226 // b_2 = x_1 < 20 [0,0] = x_1 < 20, false, so x_1 == [20, 255]
|
|
2227 // which is the result we are looking for.. so.. pass it through.
|
|
2228
|
|
2229 bool
|
|
2230 operator_logical_not::fold_range (value_range &r, tree type,
|
|
2231 const value_range &lh,
|
|
2232 const value_range &rh ATTRIBUTE_UNUSED) const
|
|
2233 {
|
|
2234 if (empty_range_check (r, lh, rh))
|
|
2235 return true;
|
|
2236
|
|
2237 if (lh.varying_p () || lh.undefined_p ())
|
|
2238 r = lh;
|
|
2239 else
|
|
2240 {
|
|
2241 r = lh;
|
|
2242 r.invert ();
|
|
2243 }
|
|
2244 gcc_checking_assert (lh.type() == type);
|
|
2245 return true;
|
|
2246 }
|
|
2247
|
|
2248 bool
|
|
2249 operator_logical_not::op1_range (value_range &r,
|
|
2250 tree type ATTRIBUTE_UNUSED,
|
|
2251 const value_range &lhs,
|
|
2252 const value_range &op2 ATTRIBUTE_UNUSED) const
|
|
2253 {
|
|
2254 r = lhs;
|
|
2255 if (!lhs.varying_p () && !lhs.undefined_p ())
|
|
2256 r.invert ();
|
|
2257 return true;
|
|
2258 }
|
|
2259
|
|
2260
|
|
2261 class operator_bitwise_not : public range_operator
|
|
2262 {
|
|
2263 public:
|
|
2264 virtual bool fold_range (value_range &r, tree type,
|
|
2265 const value_range &lh,
|
|
2266 const value_range &rh) const;
|
|
2267 virtual bool op1_range (value_range &r, tree type,
|
|
2268 const value_range &lhs,
|
|
2269 const value_range &op2) const;
|
|
2270 } op_bitwise_not;
|
|
2271
|
|
2272 bool
|
|
2273 operator_bitwise_not::fold_range (value_range &r, tree type,
|
|
2274 const value_range &lh,
|
|
2275 const value_range &rh) const
|
|
2276 {
|
|
2277 if (empty_range_check (r, lh, rh))
|
|
2278 return true;
|
|
2279
|
|
2280 // ~X is simply -1 - X.
|
|
2281 value_range minusone (type, wi::minus_one (TYPE_PRECISION (type)),
|
|
2282 wi::minus_one (TYPE_PRECISION (type)));
|
|
2283 return range_op_handler (MINUS_EXPR, type)->fold_range (r, type, minusone,
|
|
2284 lh);
|
|
2285 }
|
|
2286
|
|
2287 bool
|
|
2288 operator_bitwise_not::op1_range (value_range &r, tree type,
|
|
2289 const value_range &lhs,
|
|
2290 const value_range &op2) const
|
|
2291 {
|
|
2292 // ~X is -1 - X and since bitwise NOT is involutary...do it again.
|
|
2293 return fold_range (r, type, lhs, op2);
|
|
2294 }
|
|
2295
|
|
2296
|
|
2297 class operator_cst : public range_operator
|
|
2298 {
|
|
2299 public:
|
|
2300 virtual bool fold_range (value_range &r, tree type,
|
|
2301 const value_range &op1,
|
|
2302 const value_range &op2) const;
|
|
2303 } op_integer_cst;
|
|
2304
|
|
2305 bool
|
|
2306 operator_cst::fold_range (value_range &r, tree type ATTRIBUTE_UNUSED,
|
|
2307 const value_range &lh,
|
|
2308 const value_range &rh ATTRIBUTE_UNUSED) const
|
|
2309 {
|
|
2310 r = lh;
|
|
2311 return true;
|
|
2312 }
|
|
2313
|
|
2314
|
|
2315 class operator_identity : public range_operator
|
|
2316 {
|
|
2317 public:
|
|
2318 virtual bool fold_range (value_range &r, tree type,
|
|
2319 const value_range &op1,
|
|
2320 const value_range &op2) const;
|
|
2321 virtual bool op1_range (value_range &r, tree type,
|
|
2322 const value_range &lhs,
|
|
2323 const value_range &op2) const;
|
|
2324 } op_identity;
|
|
2325
|
|
2326 bool
|
|
2327 operator_identity::fold_range (value_range &r, tree type ATTRIBUTE_UNUSED,
|
|
2328 const value_range &lh,
|
|
2329 const value_range &rh ATTRIBUTE_UNUSED) const
|
|
2330 {
|
|
2331 r = lh;
|
|
2332 return true;
|
|
2333 }
|
|
2334
|
|
2335 bool
|
|
2336 operator_identity::op1_range (value_range &r, tree type ATTRIBUTE_UNUSED,
|
|
2337 const value_range &lhs,
|
|
2338 const value_range &op2 ATTRIBUTE_UNUSED) const
|
|
2339 {
|
|
2340 r = lhs;
|
|
2341 return true;
|
|
2342 }
|
|
2343
|
|
2344
|
|
2345 class operator_abs : public range_operator
|
|
2346 {
|
|
2347 public:
|
|
2348 virtual void wi_fold (value_range &r, tree type,
|
|
2349 const wide_int &lh_lb,
|
|
2350 const wide_int &lh_ub,
|
|
2351 const wide_int &rh_lb,
|
|
2352 const wide_int &rh_ub) const;
|
|
2353 virtual bool op1_range (value_range &r, tree type,
|
|
2354 const value_range &lhs,
|
|
2355 const value_range &op2) const;
|
|
2356 } op_abs;
|
|
2357
|
|
2358 void
|
|
2359 operator_abs::wi_fold (value_range &r, tree type,
|
|
2360 const wide_int &lh_lb, const wide_int &lh_ub,
|
|
2361 const wide_int &rh_lb ATTRIBUTE_UNUSED,
|
|
2362 const wide_int &rh_ub ATTRIBUTE_UNUSED) const
|
|
2363 {
|
|
2364 wide_int min, max;
|
|
2365 signop sign = TYPE_SIGN (type);
|
|
2366 unsigned prec = TYPE_PRECISION (type);
|
|
2367
|
|
2368 // Pass through LH for the easy cases.
|
|
2369 if (sign == UNSIGNED || wi::ge_p (lh_lb, 0, sign))
|
|
2370 {
|
|
2371 r = value_range (type, lh_lb, lh_ub);
|
|
2372 return;
|
|
2373 }
|
|
2374
|
|
2375 // -TYPE_MIN_VALUE = TYPE_MIN_VALUE with flag_wrapv so we can't get
|
|
2376 // a useful range.
|
|
2377 wide_int min_value = wi::min_value (prec, sign);
|
|
2378 wide_int max_value = wi::max_value (prec, sign);
|
|
2379 if (!TYPE_OVERFLOW_UNDEFINED (type) && wi::eq_p (lh_lb, min_value))
|
|
2380 {
|
|
2381 r = value_range (type);
|
|
2382 return;
|
|
2383 }
|
|
2384
|
|
2385 // ABS_EXPR may flip the range around, if the original range
|
|
2386 // included negative values.
|
|
2387 if (wi::eq_p (lh_lb, min_value))
|
|
2388 min = max_value;
|
|
2389 else
|
|
2390 min = wi::abs (lh_lb);
|
|
2391 if (wi::eq_p (lh_ub, min_value))
|
|
2392 max = max_value;
|
|
2393 else
|
|
2394 max = wi::abs (lh_ub);
|
|
2395
|
|
2396 // If the range contains zero then we know that the minimum value in the
|
|
2397 // range will be zero.
|
|
2398 if (wi::le_p (lh_lb, 0, sign) && wi::ge_p (lh_ub, 0, sign))
|
|
2399 {
|
|
2400 if (wi::gt_p (min, max, sign))
|
|
2401 max = min;
|
|
2402 min = wi::zero (prec);
|
|
2403 }
|
|
2404 else
|
|
2405 {
|
|
2406 // If the range was reversed, swap MIN and MAX.
|
|
2407 if (wi::gt_p (min, max, sign))
|
|
2408 std::swap (min, max);
|
|
2409 }
|
|
2410
|
|
2411 // If the new range has its limits swapped around (MIN > MAX), then
|
|
2412 // the operation caused one of them to wrap around. The only thing
|
|
2413 // we know is that the result is positive.
|
|
2414 if (wi::gt_p (min, max, sign))
|
|
2415 {
|
|
2416 min = wi::zero (prec);
|
|
2417 max = max_value;
|
|
2418 }
|
|
2419 r = value_range (type, min, max);
|
|
2420 }
|
|
2421
|
|
2422 bool
|
|
2423 operator_abs::op1_range (value_range &r, tree type,
|
|
2424 const value_range &lhs,
|
|
2425 const value_range &op2) const
|
|
2426 {
|
|
2427 if (empty_range_check (r, lhs, op2))
|
|
2428 return true;
|
|
2429 if (TYPE_UNSIGNED (type))
|
|
2430 {
|
|
2431 r = lhs;
|
|
2432 return true;
|
|
2433 }
|
|
2434 // Start with the positives because negatives are an impossible result.
|
|
2435 value_range positives = range_positives (type);
|
|
2436 positives.intersect (lhs);
|
|
2437 r = positives;
|
|
2438 // Then add the negative of each pair:
|
|
2439 // ABS(op1) = [5,20] would yield op1 => [-20,-5][5,20].
|
|
2440 for (unsigned i = 0; i < positives.num_pairs (); ++i)
|
|
2441 r.union_ (value_range (type,
|
|
2442 -positives.upper_bound (i),
|
|
2443 -positives.lower_bound (i)));
|
|
2444 return true;
|
|
2445 }
|
|
2446
|
|
2447
|
|
2448 class operator_absu : public range_operator
|
|
2449 {
|
|
2450 public:
|
|
2451 virtual void wi_fold (value_range &r, tree type,
|
|
2452 const wide_int &lh_lb, const wide_int &lh_ub,
|
|
2453 const wide_int &rh_lb, const wide_int &rh_ub) const;
|
|
2454 } op_absu;
|
|
2455
|
|
2456 void
|
|
2457 operator_absu::wi_fold (value_range &r, tree type,
|
|
2458 const wide_int &lh_lb, const wide_int &lh_ub,
|
|
2459 const wide_int &rh_lb ATTRIBUTE_UNUSED,
|
|
2460 const wide_int &rh_ub ATTRIBUTE_UNUSED) const
|
|
2461 {
|
|
2462 wide_int new_lb, new_ub;
|
|
2463
|
|
2464 // Pass through VR0 the easy cases.
|
|
2465 if (wi::ges_p (lh_lb, 0))
|
|
2466 {
|
|
2467 new_lb = lh_lb;
|
|
2468 new_ub = lh_ub;
|
|
2469 }
|
|
2470 else
|
|
2471 {
|
|
2472 new_lb = wi::abs (lh_lb);
|
|
2473 new_ub = wi::abs (lh_ub);
|
|
2474
|
|
2475 // If the range contains zero then we know that the minimum
|
|
2476 // value in the range will be zero.
|
|
2477 if (wi::ges_p (lh_ub, 0))
|
|
2478 {
|
|
2479 if (wi::gtu_p (new_lb, new_ub))
|
|
2480 new_ub = new_lb;
|
|
2481 new_lb = wi::zero (TYPE_PRECISION (type));
|
|
2482 }
|
|
2483 else
|
|
2484 std::swap (new_lb, new_ub);
|
|
2485 }
|
|
2486
|
|
2487 gcc_checking_assert (TYPE_UNSIGNED (type));
|
|
2488 r = value_range (type, new_lb, new_ub);
|
|
2489 }
|
|
2490
|
|
2491
|
|
2492 class operator_negate : public range_operator
|
|
2493 {
|
|
2494 public:
|
|
2495 virtual bool fold_range (value_range &r, tree type,
|
|
2496 const value_range &op1,
|
|
2497 const value_range &op2) const;
|
|
2498 virtual bool op1_range (value_range &r, tree type,
|
|
2499 const value_range &lhs,
|
|
2500 const value_range &op2) const;
|
|
2501 } op_negate;
|
|
2502
|
|
2503 bool
|
|
2504 operator_negate::fold_range (value_range &r, tree type,
|
|
2505 const value_range &lh,
|
|
2506 const value_range &rh) const
|
|
2507 {
|
|
2508 if (empty_range_check (r, lh, rh))
|
|
2509 return true;
|
|
2510 // -X is simply 0 - X.
|
|
2511 return range_op_handler (MINUS_EXPR, type)->fold_range (r, type,
|
|
2512 range_zero (type),
|
|
2513 lh);
|
|
2514 }
|
|
2515
|
|
2516 bool
|
|
2517 operator_negate::op1_range (value_range &r, tree type,
|
|
2518 const value_range &lhs,
|
|
2519 const value_range &op2) const
|
|
2520 {
|
|
2521 // NEGATE is involutory.
|
|
2522 return fold_range (r, type, lhs, op2);
|
|
2523 }
|
|
2524
|
|
2525
|
|
2526 class operator_addr_expr : public range_operator
|
|
2527 {
|
|
2528 public:
|
|
2529 virtual bool fold_range (value_range &r, tree type,
|
|
2530 const value_range &op1,
|
|
2531 const value_range &op2) const;
|
|
2532 virtual bool op1_range (value_range &r, tree type,
|
|
2533 const value_range &lhs,
|
|
2534 const value_range &op2) const;
|
|
2535 } op_addr;
|
|
2536
|
|
2537 bool
|
|
2538 operator_addr_expr::fold_range (value_range &r, tree type,
|
|
2539 const value_range &lh,
|
|
2540 const value_range &rh) const
|
|
2541 {
|
|
2542 if (empty_range_check (r, lh, rh))
|
|
2543 return true;
|
|
2544
|
|
2545 // Return a non-null pointer of the LHS type (passed in op2).
|
|
2546 if (lh.zero_p ())
|
|
2547 r = range_zero (type);
|
|
2548 else if (!lh.contains_p (build_zero_cst (lh.type ())))
|
|
2549 r = range_nonzero (type);
|
|
2550 else
|
|
2551 r = value_range (type);
|
|
2552 return true;
|
|
2553 }
|
|
2554
|
|
2555 bool
|
|
2556 operator_addr_expr::op1_range (value_range &r, tree type,
|
|
2557 const value_range &lhs,
|
|
2558 const value_range &op2) const
|
|
2559 {
|
|
2560 return operator_addr_expr::fold_range (r, type, lhs, op2);
|
|
2561 }
|
|
2562
|
|
2563
|
|
2564 class pointer_plus_operator : public range_operator
|
|
2565 {
|
|
2566 public:
|
|
2567 virtual void wi_fold (value_range &r, tree type,
|
|
2568 const wide_int &lh_lb,
|
|
2569 const wide_int &lh_ub,
|
|
2570 const wide_int &rh_lb,
|
|
2571 const wide_int &rh_ub) const;
|
|
2572 } op_pointer_plus;
|
|
2573
|
|
2574 void
|
|
2575 pointer_plus_operator::wi_fold (value_range &r, tree type,
|
|
2576 const wide_int &lh_lb,
|
|
2577 const wide_int &lh_ub,
|
|
2578 const wide_int &rh_lb,
|
|
2579 const wide_int &rh_ub) const
|
|
2580 {
|
|
2581 // For pointer types, we are really only interested in asserting
|
|
2582 // whether the expression evaluates to non-NULL.
|
|
2583 //
|
|
2584 // With -fno-delete-null-pointer-checks we need to be more
|
|
2585 // conservative. As some object might reside at address 0,
|
|
2586 // then some offset could be added to it and the same offset
|
|
2587 // subtracted again and the result would be NULL.
|
|
2588 // E.g.
|
|
2589 // static int a[12]; where &a[0] is NULL and
|
|
2590 // ptr = &a[6];
|
|
2591 // ptr -= 6;
|
|
2592 // ptr will be NULL here, even when there is POINTER_PLUS_EXPR
|
|
2593 // where the first range doesn't include zero and the second one
|
|
2594 // doesn't either. As the second operand is sizetype (unsigned),
|
|
2595 // consider all ranges where the MSB could be set as possible
|
|
2596 // subtractions where the result might be NULL.
|
|
2597 if ((!wi_includes_zero_p (type, lh_lb, lh_ub)
|
|
2598 || !wi_includes_zero_p (type, rh_lb, rh_ub))
|
|
2599 && !TYPE_OVERFLOW_WRAPS (type)
|
|
2600 && (flag_delete_null_pointer_checks
|
|
2601 || !wi::sign_mask (rh_ub)))
|
|
2602 r = range_nonzero (type);
|
|
2603 else if (lh_lb == lh_ub && lh_lb == 0
|
|
2604 && rh_lb == rh_ub && rh_lb == 0)
|
|
2605 r = range_zero (type);
|
|
2606 else
|
|
2607 r = value_range (type);
|
|
2608 }
|
|
2609
|
|
2610
|
|
2611 class pointer_min_max_operator : public range_operator
|
|
2612 {
|
|
2613 public:
|
|
2614 virtual void wi_fold (value_range & r, tree type,
|
|
2615 const wide_int &lh_lb, const wide_int &lh_ub,
|
|
2616 const wide_int &rh_lb, const wide_int &rh_ub) const;
|
|
2617 } op_ptr_min_max;
|
|
2618
|
|
2619 void
|
|
2620 pointer_min_max_operator::wi_fold (value_range &r, tree type,
|
|
2621 const wide_int &lh_lb,
|
|
2622 const wide_int &lh_ub,
|
|
2623 const wide_int &rh_lb,
|
|
2624 const wide_int &rh_ub) const
|
|
2625 {
|
|
2626 // For MIN/MAX expressions with pointers, we only care about
|
|
2627 // nullness. If both are non null, then the result is nonnull.
|
|
2628 // If both are null, then the result is null. Otherwise they
|
|
2629 // are varying.
|
|
2630 if (!wi_includes_zero_p (type, lh_lb, lh_ub)
|
|
2631 && !wi_includes_zero_p (type, rh_lb, rh_ub))
|
|
2632 r = range_nonzero (type);
|
|
2633 else if (wi_zero_p (type, lh_lb, lh_ub) && wi_zero_p (type, rh_lb, rh_ub))
|
|
2634 r = range_zero (type);
|
|
2635 else
|
|
2636 r = value_range (type);
|
|
2637 }
|
|
2638
|
|
2639
|
|
2640 class pointer_and_operator : public range_operator
|
|
2641 {
|
|
2642 public:
|
|
2643 virtual void wi_fold (value_range &r, tree type,
|
|
2644 const wide_int &lh_lb, const wide_int &lh_ub,
|
|
2645 const wide_int &rh_lb, const wide_int &rh_ub) const;
|
|
2646 } op_pointer_and;
|
|
2647
|
|
2648 void
|
|
2649 pointer_and_operator::wi_fold (value_range &r, tree type,
|
|
2650 const wide_int &lh_lb,
|
|
2651 const wide_int &lh_ub,
|
|
2652 const wide_int &rh_lb ATTRIBUTE_UNUSED,
|
|
2653 const wide_int &rh_ub ATTRIBUTE_UNUSED) const
|
|
2654 {
|
|
2655 // For pointer types, we are really only interested in asserting
|
|
2656 // whether the expression evaluates to non-NULL.
|
|
2657 if (wi_zero_p (type, lh_lb, lh_ub) || wi_zero_p (type, lh_lb, lh_ub))
|
|
2658 r = range_zero (type);
|
|
2659 else
|
|
2660 r = value_range (type);
|
|
2661 }
|
|
2662
|
|
2663
|
|
2664 class pointer_or_operator : public range_operator
|
|
2665 {
|
|
2666 public:
|
|
2667 virtual void wi_fold (value_range &r, tree type,
|
|
2668 const wide_int &lh_lb, const wide_int &lh_ub,
|
|
2669 const wide_int &rh_lb, const wide_int &rh_ub) const;
|
|
2670 } op_pointer_or;
|
|
2671
|
|
2672 void
|
|
2673 pointer_or_operator::wi_fold (value_range &r, tree type,
|
|
2674 const wide_int &lh_lb,
|
|
2675 const wide_int &lh_ub,
|
|
2676 const wide_int &rh_lb,
|
|
2677 const wide_int &rh_ub) const
|
|
2678 {
|
|
2679 // For pointer types, we are really only interested in asserting
|
|
2680 // whether the expression evaluates to non-NULL.
|
|
2681 if (!wi_includes_zero_p (type, lh_lb, lh_ub)
|
|
2682 && !wi_includes_zero_p (type, rh_lb, rh_ub))
|
|
2683 r = range_nonzero (type);
|
|
2684 else if (wi_zero_p (type, lh_lb, lh_ub) && wi_zero_p (type, rh_lb, rh_ub))
|
|
2685 r = range_zero (type);
|
|
2686 else
|
|
2687 r = value_range (type);
|
|
2688 }
|
|
2689
|
|
2690 // This implements the range operator tables as local objects in this file.
|
|
2691
|
|
2692 class range_op_table
|
|
2693 {
|
|
2694 public:
|
|
2695 inline range_operator *operator[] (enum tree_code code);
|
|
2696 protected:
|
|
2697 void set (enum tree_code code, range_operator &op);
|
|
2698 private:
|
|
2699 range_operator *m_range_tree[MAX_TREE_CODES];
|
|
2700 };
|
|
2701
|
|
2702 // Return a pointer to the range_operator instance, if there is one
|
|
2703 // associated with tree_code CODE.
|
|
2704
|
|
2705 range_operator *
|
|
2706 range_op_table::operator[] (enum tree_code code)
|
|
2707 {
|
|
2708 gcc_checking_assert (code > 0 && code < MAX_TREE_CODES);
|
|
2709 return m_range_tree[code];
|
|
2710 }
|
|
2711
|
|
2712 // Add OP to the handler table for CODE.
|
|
2713
|
|
2714 void
|
|
2715 range_op_table::set (enum tree_code code, range_operator &op)
|
|
2716 {
|
|
2717 gcc_checking_assert (m_range_tree[code] == NULL);
|
|
2718 m_range_tree[code] = &op;
|
|
2719 }
|
|
2720
|
|
2721 // Instantiate a range op table for integral operations.
|
|
2722
|
|
2723 class integral_table : public range_op_table
|
|
2724 {
|
|
2725 public:
|
|
2726 integral_table ();
|
|
2727 } integral_tree_table;
|
|
2728
|
|
2729 integral_table::integral_table ()
|
|
2730 {
|
|
2731 set (EQ_EXPR, op_equal);
|
|
2732 set (NE_EXPR, op_not_equal);
|
|
2733 set (LT_EXPR, op_lt);
|
|
2734 set (LE_EXPR, op_le);
|
|
2735 set (GT_EXPR, op_gt);
|
|
2736 set (GE_EXPR, op_ge);
|
|
2737 set (PLUS_EXPR, op_plus);
|
|
2738 set (MINUS_EXPR, op_minus);
|
|
2739 set (MIN_EXPR, op_min);
|
|
2740 set (MAX_EXPR, op_max);
|
|
2741 set (MULT_EXPR, op_mult);
|
|
2742 set (TRUNC_DIV_EXPR, op_trunc_div);
|
|
2743 set (FLOOR_DIV_EXPR, op_floor_div);
|
|
2744 set (ROUND_DIV_EXPR, op_round_div);
|
|
2745 set (CEIL_DIV_EXPR, op_ceil_div);
|
|
2746 set (EXACT_DIV_EXPR, op_exact_div);
|
|
2747 set (LSHIFT_EXPR, op_lshift);
|
|
2748 set (RSHIFT_EXPR, op_rshift);
|
|
2749 set (NOP_EXPR, op_convert);
|
|
2750 set (CONVERT_EXPR, op_convert);
|
|
2751 set (TRUTH_AND_EXPR, op_logical_and);
|
|
2752 set (BIT_AND_EXPR, op_bitwise_and);
|
|
2753 set (TRUTH_OR_EXPR, op_logical_or);
|
|
2754 set (BIT_IOR_EXPR, op_bitwise_or);
|
|
2755 set (BIT_XOR_EXPR, op_bitwise_xor);
|
|
2756 set (TRUNC_MOD_EXPR, op_trunc_mod);
|
|
2757 set (TRUTH_NOT_EXPR, op_logical_not);
|
|
2758 set (BIT_NOT_EXPR, op_bitwise_not);
|
|
2759 set (INTEGER_CST, op_integer_cst);
|
|
2760 set (SSA_NAME, op_identity);
|
|
2761 set (PAREN_EXPR, op_identity);
|
|
2762 set (OBJ_TYPE_REF, op_identity);
|
|
2763 set (ABS_EXPR, op_abs);
|
|
2764 set (ABSU_EXPR, op_absu);
|
|
2765 set (NEGATE_EXPR, op_negate);
|
|
2766 set (ADDR_EXPR, op_addr);
|
|
2767 }
|
|
2768
|
|
2769 // Instantiate a range op table for pointer operations.
|
|
2770
|
|
2771 class pointer_table : public range_op_table
|
|
2772 {
|
|
2773 public:
|
|
2774 pointer_table ();
|
|
2775 } pointer_tree_table;
|
|
2776
|
|
2777 pointer_table::pointer_table ()
|
|
2778 {
|
|
2779 set (BIT_AND_EXPR, op_pointer_and);
|
|
2780 set (BIT_IOR_EXPR, op_pointer_or);
|
|
2781 set (MIN_EXPR, op_ptr_min_max);
|
|
2782 set (MAX_EXPR, op_ptr_min_max);
|
|
2783 set (POINTER_PLUS_EXPR, op_pointer_plus);
|
|
2784
|
|
2785 set (EQ_EXPR, op_equal);
|
|
2786 set (NE_EXPR, op_not_equal);
|
|
2787 set (LT_EXPR, op_lt);
|
|
2788 set (LE_EXPR, op_le);
|
|
2789 set (GT_EXPR, op_gt);
|
|
2790 set (GE_EXPR, op_ge);
|
|
2791 set (SSA_NAME, op_identity);
|
|
2792 set (ADDR_EXPR, op_addr);
|
|
2793 set (NOP_EXPR, op_convert);
|
|
2794 set (CONVERT_EXPR, op_convert);
|
|
2795
|
|
2796 set (BIT_NOT_EXPR, op_bitwise_not);
|
|
2797 set (BIT_XOR_EXPR, op_bitwise_xor);
|
|
2798 }
|
|
2799
|
|
2800 // The tables are hidden and accessed via a simple extern function.
|
|
2801
|
|
2802 range_operator *
|
|
2803 range_op_handler (enum tree_code code, tree type)
|
|
2804 {
|
|
2805 // First check if there is apointer specialization.
|
|
2806 if (POINTER_TYPE_P (type))
|
|
2807 return pointer_tree_table[code];
|
|
2808 return integral_tree_table[code];
|
|
2809 }
|
|
2810
|
|
2811 // Cast the range in R to TYPE.
|
|
2812
|
|
2813 void
|
|
2814 range_cast (value_range &r, tree type)
|
|
2815 {
|
|
2816 value_range tmp = r;
|
|
2817 range_operator *op = range_op_handler (CONVERT_EXPR, type);
|
|
2818 // Call op_convert, if it fails, the result is varying.
|
|
2819 if (!op->fold_range (r, type, tmp, value_range (type)))
|
|
2820 r = value_range (type);
|
|
2821 }
|
|
2822
|
|
2823 #if CHECKING_P
|
|
2824 #include "selftest.h"
|
|
2825 #include "stor-layout.h"
|
|
2826
|
|
2827 namespace selftest
|
|
2828 {
|
|
2829 #define INT(N) build_int_cst (integer_type_node, (N))
|
|
2830 #define UINT(N) build_int_cstu (unsigned_type_node, (N))
|
|
2831 #define INT16(N) build_int_cst (short_integer_type_node, (N))
|
|
2832 #define UINT16(N) build_int_cstu (short_unsigned_type_node, (N))
|
|
2833 #define INT64(N) build_int_cstu (long_long_integer_type_node, (N))
|
|
2834 #define UINT64(N) build_int_cstu (long_long_unsigned_type_node, (N))
|
|
2835 #define UINT128(N) build_int_cstu (u128_type, (N))
|
|
2836 #define UCHAR(N) build_int_cstu (unsigned_char_type_node, (N))
|
|
2837 #define SCHAR(N) build_int_cst (signed_char_type_node, (N))
|
|
2838
|
|
2839 // Run all of the selftests within this file.
|
|
2840
|
|
2841 void
|
|
2842 range_tests ()
|
|
2843 {
|
|
2844 tree u128_type = build_nonstandard_integer_type (128, /*unsigned=*/1);
|
|
2845 value_range i1, i2, i3;
|
|
2846 value_range r0, r1, rold;
|
|
2847
|
|
2848 // Test that NOT(255) is [0..254] in 8-bit land.
|
|
2849 value_range not_255 (UCHAR (255), UCHAR (255), VR_ANTI_RANGE);
|
|
2850 ASSERT_TRUE (not_255 == value_range (UCHAR (0), UCHAR (254)));
|
|
2851
|
|
2852 // Test that NOT(0) is [1..255] in 8-bit land.
|
|
2853 value_range not_zero = range_nonzero (unsigned_char_type_node);
|
|
2854 ASSERT_TRUE (not_zero == value_range (UCHAR (1), UCHAR (255)));
|
|
2855
|
|
2856 // Check that [0,127][0x..ffffff80,0x..ffffff]
|
|
2857 // => ~[128, 0x..ffffff7f].
|
|
2858 r0 = value_range (UINT128 (0), UINT128 (127));
|
|
2859 tree high = build_minus_one_cst (u128_type);
|
|
2860 // low = -1 - 127 => 0x..ffffff80.
|
|
2861 tree low = fold_build2 (MINUS_EXPR, u128_type, high, UINT128(127));
|
|
2862 r1 = value_range (low, high); // [0x..ffffff80, 0x..ffffffff]
|
|
2863 // r0 = [0,127][0x..ffffff80,0x..fffffff].
|
|
2864 r0.union_ (r1);
|
|
2865 // r1 = [128, 0x..ffffff7f].
|
|
2866 r1 = value_range (UINT128(128),
|
|
2867 fold_build2 (MINUS_EXPR, u128_type,
|
|
2868 build_minus_one_cst (u128_type),
|
|
2869 UINT128(128)));
|
|
2870 r0.invert ();
|
|
2871 ASSERT_TRUE (r0 == r1);
|
|
2872
|
|
2873 r0.set_varying (integer_type_node);
|
|
2874 tree minint = wide_int_to_tree (integer_type_node, r0.lower_bound ());
|
|
2875 tree maxint = wide_int_to_tree (integer_type_node, r0.upper_bound ());
|
|
2876
|
|
2877 r0.set_varying (short_integer_type_node);
|
|
2878 tree minshort = wide_int_to_tree (short_integer_type_node, r0.lower_bound ());
|
|
2879 tree maxshort = wide_int_to_tree (short_integer_type_node, r0.upper_bound ());
|
|
2880
|
|
2881 r0.set_varying (unsigned_type_node);
|
|
2882 tree maxuint = wide_int_to_tree (unsigned_type_node, r0.upper_bound ());
|
|
2883
|
|
2884 // Check that ~[0,5] => [6,MAX] for unsigned int.
|
|
2885 r0 = value_range (UINT (0), UINT (5));
|
|
2886 r0.invert ();
|
|
2887 ASSERT_TRUE (r0 == value_range (UINT(6), maxuint));
|
|
2888
|
|
2889 // Check that ~[10,MAX] => [0,9] for unsigned int.
|
|
2890 r0 = value_range (UINT(10), maxuint);
|
|
2891 r0.invert ();
|
|
2892 ASSERT_TRUE (r0 == value_range (UINT (0), UINT (9)));
|
|
2893
|
|
2894 // Check that ~[0,5] => [6,MAX] for unsigned 128-bit numbers.
|
|
2895 r0 = value_range (UINT128 (0), UINT128 (5), VR_ANTI_RANGE);
|
|
2896 r1 = value_range (UINT128(6), build_minus_one_cst (u128_type));
|
|
2897 ASSERT_TRUE (r0 == r1);
|
|
2898
|
|
2899 // Check that [~5] is really [-MIN,4][6,MAX].
|
|
2900 r0 = value_range (INT (5), INT (5), VR_ANTI_RANGE);
|
|
2901 r1 = value_range (minint, INT (4));
|
|
2902 r1.union_ (value_range (INT (6), maxint));
|
|
2903 ASSERT_FALSE (r1.undefined_p ());
|
|
2904 ASSERT_TRUE (r0 == r1);
|
|
2905
|
|
2906 r1 = value_range (INT (5), INT (5));
|
|
2907 value_range r2 (r1);
|
|
2908 ASSERT_TRUE (r1 == r2);
|
|
2909
|
|
2910 r1 = value_range (INT (5), INT (10));
|
|
2911
|
|
2912 r1 = value_range (integer_type_node,
|
|
2913 wi::to_wide (INT (5)), wi::to_wide (INT (10)));
|
|
2914 ASSERT_TRUE (r1.contains_p (INT (7)));
|
|
2915
|
|
2916 r1 = value_range (SCHAR (0), SCHAR (20));
|
|
2917 ASSERT_TRUE (r1.contains_p (SCHAR(15)));
|
|
2918 ASSERT_FALSE (r1.contains_p (SCHAR(300)));
|
|
2919
|
|
2920 // If a range is in any way outside of the range for the converted
|
|
2921 // to range, default to the range for the new type.
|
|
2922 if (TYPE_PRECISION (TREE_TYPE (maxint))
|
|
2923 > TYPE_PRECISION (short_integer_type_node))
|
|
2924 {
|
|
2925 r1 = value_range (integer_zero_node, maxint);
|
|
2926 range_cast (r1, short_integer_type_node);
|
|
2927 ASSERT_TRUE (r1.lower_bound () == wi::to_wide (minshort)
|
|
2928 && r1.upper_bound() == wi::to_wide (maxshort));
|
|
2929 }
|
|
2930
|
|
2931 // (unsigned char)[-5,-1] => [251,255].
|
|
2932 r0 = rold = value_range (SCHAR (-5), SCHAR (-1));
|
|
2933 range_cast (r0, unsigned_char_type_node);
|
|
2934 ASSERT_TRUE (r0 == value_range (UCHAR (251), UCHAR (255)));
|
|
2935 range_cast (r0, signed_char_type_node);
|
|
2936 ASSERT_TRUE (r0 == rold);
|
|
2937
|
|
2938 // (signed char)[15, 150] => [-128,-106][15,127].
|
|
2939 r0 = rold = value_range (UCHAR (15), UCHAR (150));
|
|
2940 range_cast (r0, signed_char_type_node);
|
|
2941 r1 = value_range (SCHAR (15), SCHAR (127));
|
|
2942 r2 = value_range (SCHAR (-128), SCHAR (-106));
|
|
2943 r1.union_ (r2);
|
|
2944 ASSERT_TRUE (r1 == r0);
|
|
2945 range_cast (r0, unsigned_char_type_node);
|
|
2946 ASSERT_TRUE (r0 == rold);
|
|
2947
|
|
2948 // (unsigned char)[-5, 5] => [0,5][251,255].
|
|
2949 r0 = rold = value_range (SCHAR (-5), SCHAR (5));
|
|
2950 range_cast (r0, unsigned_char_type_node);
|
|
2951 r1 = value_range (UCHAR (251), UCHAR (255));
|
|
2952 r2 = value_range (UCHAR (0), UCHAR (5));
|
|
2953 r1.union_ (r2);
|
|
2954 ASSERT_TRUE (r0 == r1);
|
|
2955 range_cast (r0, signed_char_type_node);
|
|
2956 ASSERT_TRUE (r0 == rold);
|
|
2957
|
|
2958 // (unsigned char)[-5,5] => [0,5][251,255].
|
|
2959 r0 = value_range (INT (-5), INT (5));
|
|
2960 range_cast (r0, unsigned_char_type_node);
|
|
2961 r1 = value_range (UCHAR (0), UCHAR (5));
|
|
2962 r1.union_ (value_range (UCHAR (251), UCHAR (255)));
|
|
2963 ASSERT_TRUE (r0 == r1);
|
|
2964
|
|
2965 // (unsigned char)[5U,1974U] => [0,255].
|
|
2966 r0 = value_range (UINT (5), UINT (1974));
|
|
2967 range_cast (r0, unsigned_char_type_node);
|
|
2968 ASSERT_TRUE (r0 == value_range (UCHAR (0), UCHAR (255)));
|
|
2969 range_cast (r0, integer_type_node);
|
|
2970 // Going to a wider range should not sign extend.
|
|
2971 ASSERT_TRUE (r0 == value_range (INT (0), INT (255)));
|
|
2972
|
|
2973 // (unsigned char)[-350,15] => [0,255].
|
|
2974 r0 = value_range (INT (-350), INT (15));
|
|
2975 range_cast (r0, unsigned_char_type_node);
|
|
2976 ASSERT_TRUE (r0 == (value_range
|
|
2977 (TYPE_MIN_VALUE (unsigned_char_type_node),
|
|
2978 TYPE_MAX_VALUE (unsigned_char_type_node))));
|
|
2979
|
|
2980 // Casting [-120,20] from signed char to unsigned short.
|
|
2981 // => [0, 20][0xff88, 0xffff].
|
|
2982 r0 = value_range (SCHAR (-120), SCHAR (20));
|
|
2983 range_cast (r0, short_unsigned_type_node);
|
|
2984 r1 = value_range (UINT16 (0), UINT16 (20));
|
|
2985 r2 = value_range (UINT16 (0xff88), UINT16 (0xffff));
|
|
2986 r1.union_ (r2);
|
|
2987 ASSERT_TRUE (r0 == r1);
|
|
2988 // A truncating cast back to signed char will work because [-120, 20]
|
|
2989 // is representable in signed char.
|
|
2990 range_cast (r0, signed_char_type_node);
|
|
2991 ASSERT_TRUE (r0 == value_range (SCHAR (-120), SCHAR (20)));
|
|
2992
|
|
2993 // unsigned char -> signed short
|
|
2994 // (signed short)[(unsigned char)25, (unsigned char)250]
|
|
2995 // => [(signed short)25, (signed short)250]
|
|
2996 r0 = rold = value_range (UCHAR (25), UCHAR (250));
|
|
2997 range_cast (r0, short_integer_type_node);
|
|
2998 r1 = value_range (INT16 (25), INT16 (250));
|
|
2999 ASSERT_TRUE (r0 == r1);
|
|
3000 range_cast (r0, unsigned_char_type_node);
|
|
3001 ASSERT_TRUE (r0 == rold);
|
|
3002
|
|
3003 // Test casting a wider signed [-MIN,MAX] to a nar`rower unsigned.
|
|
3004 r0 = value_range (TYPE_MIN_VALUE (long_long_integer_type_node),
|
|
3005 TYPE_MAX_VALUE (long_long_integer_type_node));
|
|
3006 range_cast (r0, short_unsigned_type_node);
|
|
3007 r1 = value_range (TYPE_MIN_VALUE (short_unsigned_type_node),
|
|
3008 TYPE_MAX_VALUE (short_unsigned_type_node));
|
|
3009 ASSERT_TRUE (r0 == r1);
|
|
3010
|
|
3011 // NOT([10,20]) ==> [-MIN,9][21,MAX].
|
|
3012 r0 = r1 = value_range (INT (10), INT (20));
|
|
3013 r2 = value_range (minint, INT(9));
|
|
3014 r2.union_ (value_range (INT(21), maxint));
|
|
3015 ASSERT_FALSE (r2.undefined_p ());
|
|
3016 r1.invert ();
|
|
3017 ASSERT_TRUE (r1 == r2);
|
|
3018 // Test that NOT(NOT(x)) == x.
|
|
3019 r2.invert ();
|
|
3020 ASSERT_TRUE (r0 == r2);
|
|
3021
|
|
3022 // Test that booleans and their inverse work as expected.
|
|
3023 r0 = range_zero (boolean_type_node);
|
|
3024 ASSERT_TRUE (r0 == value_range (build_zero_cst (boolean_type_node),
|
|
3025 build_zero_cst (boolean_type_node)));
|
|
3026 r0.invert ();
|
|
3027 ASSERT_TRUE (r0 == value_range (build_one_cst (boolean_type_node),
|
|
3028 build_one_cst (boolean_type_node)));
|
|
3029
|
|
3030 // Casting NONZERO to a narrower type will wrap/overflow so
|
|
3031 // it's just the entire range for the narrower type.
|
|
3032 //
|
|
3033 // "NOT 0 at signed 32-bits" ==> [-MIN_32,-1][1, +MAX_32]. This is
|
|
3034 // is outside of the range of a smaller range, return the full
|
|
3035 // smaller range.
|
|
3036 if (TYPE_PRECISION (integer_type_node)
|
|
3037 > TYPE_PRECISION (short_integer_type_node))
|
|
3038 {
|
|
3039 r0 = range_nonzero (integer_type_node);
|
|
3040 range_cast (r0, short_integer_type_node);
|
|
3041 r1 = value_range (TYPE_MIN_VALUE (short_integer_type_node),
|
|
3042 TYPE_MAX_VALUE (short_integer_type_node));
|
|
3043 ASSERT_TRUE (r0 == r1);
|
|
3044 }
|
|
3045
|
|
3046 // Casting NONZERO from a narrower signed to a wider signed.
|
|
3047 //
|
|
3048 // NONZERO signed 16-bits is [-MIN_16,-1][1, +MAX_16].
|
|
3049 // Converting this to 32-bits signed is [-MIN_16,-1][1, +MAX_16].
|
|
3050 r0 = range_nonzero (short_integer_type_node);
|
|
3051 range_cast (r0, integer_type_node);
|
|
3052 r1 = value_range (INT (-32768), INT (-1));
|
|
3053 r2 = value_range (INT (1), INT (32767));
|
|
3054 r1.union_ (r2);
|
|
3055 ASSERT_TRUE (r0 == r1);
|
|
3056
|
|
3057 // Make sure NULL and non-NULL of pointer types work, and that
|
|
3058 // inverses of them are consistent.
|
|
3059 tree voidp = build_pointer_type (void_type_node);
|
|
3060 r0 = range_zero (voidp);
|
|
3061 r1 = r0;
|
|
3062 r0.invert ();
|
|
3063 r0.invert ();
|
|
3064 ASSERT_TRUE (r0 == r1);
|
|
3065
|
|
3066 // [10,20] U [15, 30] => [10, 30].
|
|
3067 r0 = value_range (INT (10), INT (20));
|
|
3068 r1 = value_range (INT (15), INT (30));
|
|
3069 r0.union_ (r1);
|
|
3070 ASSERT_TRUE (r0 == value_range (INT (10), INT (30)));
|
|
3071
|
|
3072 // [15,40] U [] => [15,40].
|
|
3073 r0 = value_range (INT (15), INT (40));
|
|
3074 r1.set_undefined ();
|
|
3075 r0.union_ (r1);
|
|
3076 ASSERT_TRUE (r0 == value_range (INT (15), INT (40)));
|
|
3077
|
|
3078 // [10,20] U [10,10] => [10,20].
|
|
3079 r0 = value_range (INT (10), INT (20));
|
|
3080 r1 = value_range (INT (10), INT (10));
|
|
3081 r0.union_ (r1);
|
|
3082 ASSERT_TRUE (r0 == value_range (INT (10), INT (20)));
|
|
3083
|
|
3084 // [10,20] U [9,9] => [9,20].
|
|
3085 r0 = value_range (INT (10), INT (20));
|
|
3086 r1 = value_range (INT (9), INT (9));
|
|
3087 r0.union_ (r1);
|
|
3088 ASSERT_TRUE (r0 == value_range (INT (9), INT (20)));
|
|
3089
|
|
3090 // [10,20] ^ [15,30] => [15,20].
|
|
3091 r0 = value_range (INT (10), INT (20));
|
|
3092 r1 = value_range (INT (15), INT (30));
|
|
3093 r0.intersect (r1);
|
|
3094 ASSERT_TRUE (r0 == value_range (INT (15), INT (20)));
|
|
3095
|
|
3096 // Test the internal sanity of wide_int's wrt HWIs.
|
|
3097 ASSERT_TRUE (wi::max_value (TYPE_PRECISION (boolean_type_node),
|
|
3098 TYPE_SIGN (boolean_type_node))
|
|
3099 == wi::uhwi (1, TYPE_PRECISION (boolean_type_node)));
|
|
3100
|
|
3101 // Test zero_p().
|
|
3102 r0 = value_range (INT (0), INT (0));
|
|
3103 ASSERT_TRUE (r0.zero_p ());
|
|
3104
|
|
3105 // Test nonzero_p().
|
|
3106 r0 = value_range (INT (0), INT (0));
|
|
3107 r0.invert ();
|
|
3108 ASSERT_TRUE (r0.nonzero_p ());
|
|
3109 }
|
|
3110
|
|
3111 } // namespace selftest
|
|
3112
|
|
3113 #endif // CHECKING_P
|