Mercurial > hg > CbC > CbC_gcc
annotate gcc/tree-ssa-phiopt.c @ 66:b362627d71ba
bug-fix: modify tail-call-optimization enforcing rules. (calls.c.)
author | Ryoma SHINYA <shinya@firefly.cr.ie.u-ryukyu.ac.jp> |
---|---|
date | Tue, 14 Dec 2010 03:58:33 +0900 |
parents | 77e2b8dfacca |
children | b7f97abdc517 |
rev | line source |
---|---|
0 | 1 /* Optimization of PHI nodes by converting them into straightline code. |
2 Copyright (C) 2004, 2005, 2006, 2007, 2008 Free Software Foundation, | |
3 Inc. | |
4 | |
5 This file is part of GCC. | |
6 | |
7 GCC is free software; you can redistribute it and/or modify it | |
8 under the terms of the GNU General Public License as published by the | |
9 Free Software Foundation; either version 3, or (at your option) any | |
10 later version. | |
11 | |
12 GCC is distributed in the hope that it will be useful, but WITHOUT | |
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
15 for more details. | |
16 | |
17 You should have received a copy of the GNU General Public License | |
18 along with GCC; see the file COPYING3. If not see | |
19 <http://www.gnu.org/licenses/>. */ | |
20 | |
21 #include "config.h" | |
22 #include "system.h" | |
23 #include "coretypes.h" | |
24 #include "tm.h" | |
25 #include "ggc.h" | |
26 #include "tree.h" | |
27 #include "rtl.h" | |
28 #include "flags.h" | |
29 #include "tm_p.h" | |
30 #include "basic-block.h" | |
31 #include "timevar.h" | |
32 #include "diagnostic.h" | |
33 #include "tree-flow.h" | |
34 #include "tree-pass.h" | |
35 #include "tree-dump.h" | |
36 #include "langhooks.h" | |
37 #include "pointer-set.h" | |
38 #include "domwalk.h" | |
39 | |
40 static unsigned int tree_ssa_phiopt (void); | |
41 static unsigned int tree_ssa_phiopt_worker (bool); | |
42 static bool conditional_replacement (basic_block, basic_block, | |
43 edge, edge, gimple, tree, tree); | |
44 static bool value_replacement (basic_block, basic_block, | |
45 edge, edge, gimple, tree, tree); | |
46 static bool minmax_replacement (basic_block, basic_block, | |
47 edge, edge, gimple, tree, tree); | |
48 static bool abs_replacement (basic_block, basic_block, | |
49 edge, edge, gimple, tree, tree); | |
50 static bool cond_store_replacement (basic_block, basic_block, edge, edge, | |
51 struct pointer_set_t *); | |
52 static struct pointer_set_t * get_non_trapping (void); | |
53 static void replace_phi_edge_with_variable (basic_block, edge, gimple, tree); | |
54 | |
55 /* This pass tries to replaces an if-then-else block with an | |
56 assignment. We have four kinds of transformations. Some of these | |
57 transformations are also performed by the ifcvt RTL optimizer. | |
58 | |
59 Conditional Replacement | |
60 ----------------------- | |
61 | |
62 This transformation, implemented in conditional_replacement, | |
63 replaces | |
64 | |
65 bb0: | |
66 if (cond) goto bb2; else goto bb1; | |
67 bb1: | |
68 bb2: | |
69 x = PHI <0 (bb1), 1 (bb0), ...>; | |
70 | |
71 with | |
72 | |
73 bb0: | |
74 x' = cond; | |
75 goto bb2; | |
76 bb2: | |
77 x = PHI <x' (bb0), ...>; | |
78 | |
79 We remove bb1 as it becomes unreachable. This occurs often due to | |
80 gimplification of conditionals. | |
81 | |
82 Value Replacement | |
83 ----------------- | |
84 | |
85 This transformation, implemented in value_replacement, replaces | |
86 | |
87 bb0: | |
88 if (a != b) goto bb2; else goto bb1; | |
89 bb1: | |
90 bb2: | |
91 x = PHI <a (bb1), b (bb0), ...>; | |
92 | |
93 with | |
94 | |
95 bb0: | |
96 bb2: | |
97 x = PHI <b (bb0), ...>; | |
98 | |
99 This opportunity can sometimes occur as a result of other | |
100 optimizations. | |
101 | |
102 ABS Replacement | |
103 --------------- | |
104 | |
105 This transformation, implemented in abs_replacement, replaces | |
106 | |
107 bb0: | |
108 if (a >= 0) goto bb2; else goto bb1; | |
109 bb1: | |
110 x = -a; | |
111 bb2: | |
112 x = PHI <x (bb1), a (bb0), ...>; | |
113 | |
114 with | |
115 | |
116 bb0: | |
117 x' = ABS_EXPR< a >; | |
118 bb2: | |
119 x = PHI <x' (bb0), ...>; | |
120 | |
121 MIN/MAX Replacement | |
122 ------------------- | |
123 | |
124 This transformation, minmax_replacement replaces | |
125 | |
126 bb0: | |
127 if (a <= b) goto bb2; else goto bb1; | |
128 bb1: | |
129 bb2: | |
130 x = PHI <b (bb1), a (bb0), ...>; | |
131 | |
132 with | |
133 | |
134 bb0: | |
135 x' = MIN_EXPR (a, b) | |
136 bb2: | |
137 x = PHI <x' (bb0), ...>; | |
138 | |
139 A similar transformation is done for MAX_EXPR. */ | |
140 | |
141 static unsigned int | |
142 tree_ssa_phiopt (void) | |
143 { | |
144 return tree_ssa_phiopt_worker (false); | |
145 } | |
146 | |
147 /* This pass tries to transform conditional stores into unconditional | |
148 ones, enabling further simplifications with the simpler then and else | |
149 blocks. In particular it replaces this: | |
150 | |
151 bb0: | |
152 if (cond) goto bb2; else goto bb1; | |
153 bb1: | |
154 *p = RHS | |
155 bb2: | |
156 | |
157 with | |
158 | |
159 bb0: | |
160 if (cond) goto bb1; else goto bb2; | |
161 bb1: | |
162 condtmp' = *p; | |
163 bb2: | |
164 condtmp = PHI <RHS, condtmp'> | |
165 *p = condtmp | |
166 | |
167 This transformation can only be done under several constraints, | |
168 documented below. */ | |
169 | |
170 static unsigned int | |
171 tree_ssa_cs_elim (void) | |
172 { | |
173 return tree_ssa_phiopt_worker (true); | |
174 } | |
175 | |
176 /* For conditional store replacement we need a temporary to | |
177 put the old contents of the memory in. */ | |
178 static tree condstoretemp; | |
179 | |
180 /* The core routine of conditional store replacement and normal | |
181 phi optimizations. Both share much of the infrastructure in how | |
182 to match applicable basic block patterns. DO_STORE_ELIM is true | |
183 when we want to do conditional store replacement, false otherwise. */ | |
184 static unsigned int | |
185 tree_ssa_phiopt_worker (bool do_store_elim) | |
186 { | |
187 basic_block bb; | |
188 basic_block *bb_order; | |
189 unsigned n, i; | |
190 bool cfgchanged = false; | |
191 struct pointer_set_t *nontrap = 0; | |
192 | |
193 if (do_store_elim) | |
194 { | |
195 condstoretemp = NULL_TREE; | |
196 /* Calculate the set of non-trapping memory accesses. */ | |
197 nontrap = get_non_trapping (); | |
198 } | |
199 | |
200 /* Search every basic block for COND_EXPR we may be able to optimize. | |
201 | |
202 We walk the blocks in order that guarantees that a block with | |
203 a single predecessor is processed before the predecessor. | |
204 This ensures that we collapse inner ifs before visiting the | |
205 outer ones, and also that we do not try to visit a removed | |
206 block. */ | |
207 bb_order = blocks_in_phiopt_order (); | |
208 n = n_basic_blocks - NUM_FIXED_BLOCKS; | |
209 | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
210 for (i = 0; i < n; i++) |
0 | 211 { |
212 gimple cond_stmt, phi; | |
213 basic_block bb1, bb2; | |
214 edge e1, e2; | |
215 tree arg0, arg1; | |
216 | |
217 bb = bb_order[i]; | |
218 | |
219 cond_stmt = last_stmt (bb); | |
220 /* Check to see if the last statement is a GIMPLE_COND. */ | |
221 if (!cond_stmt | |
222 || gimple_code (cond_stmt) != GIMPLE_COND) | |
223 continue; | |
224 | |
225 e1 = EDGE_SUCC (bb, 0); | |
226 bb1 = e1->dest; | |
227 e2 = EDGE_SUCC (bb, 1); | |
228 bb2 = e2->dest; | |
229 | |
230 /* We cannot do the optimization on abnormal edges. */ | |
231 if ((e1->flags & EDGE_ABNORMAL) != 0 | |
232 || (e2->flags & EDGE_ABNORMAL) != 0) | |
233 continue; | |
234 | |
235 /* If either bb1's succ or bb2 or bb2's succ is non NULL. */ | |
236 if (EDGE_COUNT (bb1->succs) == 0 | |
237 || bb2 == NULL | |
238 || EDGE_COUNT (bb2->succs) == 0) | |
239 continue; | |
240 | |
241 /* Find the bb which is the fall through to the other. */ | |
242 if (EDGE_SUCC (bb1, 0)->dest == bb2) | |
243 ; | |
244 else if (EDGE_SUCC (bb2, 0)->dest == bb1) | |
245 { | |
246 basic_block bb_tmp = bb1; | |
247 edge e_tmp = e1; | |
248 bb1 = bb2; | |
249 bb2 = bb_tmp; | |
250 e1 = e2; | |
251 e2 = e_tmp; | |
252 } | |
253 else | |
254 continue; | |
255 | |
256 e1 = EDGE_SUCC (bb1, 0); | |
257 | |
258 /* Make sure that bb1 is just a fall through. */ | |
259 if (!single_succ_p (bb1) | |
260 || (e1->flags & EDGE_FALLTHRU) == 0) | |
261 continue; | |
262 | |
263 /* Also make sure that bb1 only have one predecessor and that it | |
264 is bb. */ | |
265 if (!single_pred_p (bb1) | |
266 || single_pred (bb1) != bb) | |
267 continue; | |
268 | |
269 if (do_store_elim) | |
270 { | |
271 /* bb1 is the middle block, bb2 the join block, bb the split block, | |
272 e1 the fallthrough edge from bb1 to bb2. We can't do the | |
273 optimization if the join block has more than two predecessors. */ | |
274 if (EDGE_COUNT (bb2->preds) > 2) | |
275 continue; | |
276 if (cond_store_replacement (bb1, bb2, e1, e2, nontrap)) | |
277 cfgchanged = true; | |
278 } | |
279 else | |
280 { | |
281 gimple_seq phis = phi_nodes (bb2); | |
282 | |
283 /* Check to make sure that there is only one PHI node. | |
284 TODO: we could do it with more than one iff the other PHI nodes | |
285 have the same elements for these two edges. */ | |
286 if (! gimple_seq_singleton_p (phis)) | |
287 continue; | |
288 | |
289 phi = gsi_stmt (gsi_start (phis)); | |
290 arg0 = gimple_phi_arg_def (phi, e1->dest_idx); | |
291 arg1 = gimple_phi_arg_def (phi, e2->dest_idx); | |
292 | |
293 /* Something is wrong if we cannot find the arguments in the PHI | |
294 node. */ | |
295 gcc_assert (arg0 != NULL && arg1 != NULL); | |
296 | |
297 /* Do the replacement of conditional if it can be done. */ | |
298 if (conditional_replacement (bb, bb1, e1, e2, phi, arg0, arg1)) | |
299 cfgchanged = true; | |
300 else if (value_replacement (bb, bb1, e1, e2, phi, arg0, arg1)) | |
301 cfgchanged = true; | |
302 else if (abs_replacement (bb, bb1, e1, e2, phi, arg0, arg1)) | |
303 cfgchanged = true; | |
304 else if (minmax_replacement (bb, bb1, e1, e2, phi, arg0, arg1)) | |
305 cfgchanged = true; | |
306 } | |
307 } | |
308 | |
309 free (bb_order); | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
310 |
0 | 311 if (do_store_elim) |
312 pointer_set_destroy (nontrap); | |
313 /* If the CFG has changed, we should cleanup the CFG. */ | |
314 if (cfgchanged && do_store_elim) | |
315 { | |
316 /* In cond-store replacement we have added some loads on edges | |
317 and new VOPS (as we moved the store, and created a load). */ | |
318 gsi_commit_edge_inserts (); | |
319 return TODO_cleanup_cfg | TODO_update_ssa_only_virtuals; | |
320 } | |
321 else if (cfgchanged) | |
322 return TODO_cleanup_cfg; | |
323 return 0; | |
324 } | |
325 | |
326 /* Returns the list of basic blocks in the function in an order that guarantees | |
327 that if a block X has just a single predecessor Y, then Y is after X in the | |
328 ordering. */ | |
329 | |
330 basic_block * | |
331 blocks_in_phiopt_order (void) | |
332 { | |
333 basic_block x, y; | |
334 basic_block *order = XNEWVEC (basic_block, n_basic_blocks); | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
335 unsigned n = n_basic_blocks - NUM_FIXED_BLOCKS; |
0 | 336 unsigned np, i; |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
337 sbitmap visited = sbitmap_alloc (last_basic_block); |
0 | 338 |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
339 #define MARK_VISITED(BB) (SET_BIT (visited, (BB)->index)) |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
340 #define VISITED_P(BB) (TEST_BIT (visited, (BB)->index)) |
0 | 341 |
342 sbitmap_zero (visited); | |
343 | |
344 MARK_VISITED (ENTRY_BLOCK_PTR); | |
345 FOR_EACH_BB (x) | |
346 { | |
347 if (VISITED_P (x)) | |
348 continue; | |
349 | |
350 /* Walk the predecessors of x as long as they have precisely one | |
351 predecessor and add them to the list, so that they get stored | |
352 after x. */ | |
353 for (y = x, np = 1; | |
354 single_pred_p (y) && !VISITED_P (single_pred (y)); | |
355 y = single_pred (y)) | |
356 np++; | |
357 for (y = x, i = n - np; | |
358 single_pred_p (y) && !VISITED_P (single_pred (y)); | |
359 y = single_pred (y), i++) | |
360 { | |
361 order[i] = y; | |
362 MARK_VISITED (y); | |
363 } | |
364 order[i] = y; | |
365 MARK_VISITED (y); | |
366 | |
367 gcc_assert (i == n - 1); | |
368 n -= np; | |
369 } | |
370 | |
371 sbitmap_free (visited); | |
372 gcc_assert (n == 0); | |
373 return order; | |
374 | |
375 #undef MARK_VISITED | |
376 #undef VISITED_P | |
377 } | |
378 | |
379 | |
380 /* Return TRUE if block BB has no executable statements, otherwise return | |
381 FALSE. */ | |
382 | |
383 bool | |
384 empty_block_p (basic_block bb) | |
385 { | |
386 /* BB must have no executable statements. */ | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
387 gimple_stmt_iterator gsi = gsi_after_labels (bb); |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
388 if (gsi_end_p (gsi)) |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
389 return true; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
390 if (is_gimple_debug (gsi_stmt (gsi))) |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
391 gsi_next_nondebug (&gsi); |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
392 return gsi_end_p (gsi); |
0 | 393 } |
394 | |
395 /* Replace PHI node element whose edge is E in block BB with variable NEW. | |
396 Remove the edge from COND_BLOCK which does not lead to BB (COND_BLOCK | |
397 is known to have two edges, one of which must reach BB). */ | |
398 | |
399 static void | |
400 replace_phi_edge_with_variable (basic_block cond_block, | |
401 edge e, gimple phi, tree new_tree) | |
402 { | |
403 basic_block bb = gimple_bb (phi); | |
404 basic_block block_to_remove; | |
405 gimple_stmt_iterator gsi; | |
406 | |
407 /* Change the PHI argument to new. */ | |
408 SET_USE (PHI_ARG_DEF_PTR (phi, e->dest_idx), new_tree); | |
409 | |
410 /* Remove the empty basic block. */ | |
411 if (EDGE_SUCC (cond_block, 0)->dest == bb) | |
412 { | |
413 EDGE_SUCC (cond_block, 0)->flags |= EDGE_FALLTHRU; | |
414 EDGE_SUCC (cond_block, 0)->flags &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE); | |
415 EDGE_SUCC (cond_block, 0)->probability = REG_BR_PROB_BASE; | |
416 EDGE_SUCC (cond_block, 0)->count += EDGE_SUCC (cond_block, 1)->count; | |
417 | |
418 block_to_remove = EDGE_SUCC (cond_block, 1)->dest; | |
419 } | |
420 else | |
421 { | |
422 EDGE_SUCC (cond_block, 1)->flags |= EDGE_FALLTHRU; | |
423 EDGE_SUCC (cond_block, 1)->flags | |
424 &= ~(EDGE_TRUE_VALUE | EDGE_FALSE_VALUE); | |
425 EDGE_SUCC (cond_block, 1)->probability = REG_BR_PROB_BASE; | |
426 EDGE_SUCC (cond_block, 1)->count += EDGE_SUCC (cond_block, 0)->count; | |
427 | |
428 block_to_remove = EDGE_SUCC (cond_block, 0)->dest; | |
429 } | |
430 delete_basic_block (block_to_remove); | |
431 | |
432 /* Eliminate the COND_EXPR at the end of COND_BLOCK. */ | |
433 gsi = gsi_last_bb (cond_block); | |
434 gsi_remove (&gsi, true); | |
435 | |
436 if (dump_file && (dump_flags & TDF_DETAILS)) | |
437 fprintf (dump_file, | |
438 "COND_EXPR in block %d and PHI in block %d converted to straightline code.\n", | |
439 cond_block->index, | |
440 bb->index); | |
441 } | |
442 | |
443 /* The function conditional_replacement does the main work of doing the | |
444 conditional replacement. Return true if the replacement is done. | |
445 Otherwise return false. | |
446 BB is the basic block where the replacement is going to be done on. ARG0 | |
447 is argument 0 from PHI. Likewise for ARG1. */ | |
448 | |
449 static bool | |
450 conditional_replacement (basic_block cond_bb, basic_block middle_bb, | |
451 edge e0, edge e1, gimple phi, | |
452 tree arg0, tree arg1) | |
453 { | |
454 tree result; | |
455 gimple stmt, new_stmt; | |
456 tree cond; | |
457 gimple_stmt_iterator gsi; | |
458 edge true_edge, false_edge; | |
459 tree new_var, new_var2; | |
460 | |
461 /* FIXME: Gimplification of complex type is too hard for now. */ | |
462 if (TREE_CODE (TREE_TYPE (arg0)) == COMPLEX_TYPE | |
463 || TREE_CODE (TREE_TYPE (arg1)) == COMPLEX_TYPE) | |
464 return false; | |
465 | |
466 /* The PHI arguments have the constants 0 and 1, then convert | |
467 it to the conditional. */ | |
468 if ((integer_zerop (arg0) && integer_onep (arg1)) | |
469 || (integer_zerop (arg1) && integer_onep (arg0))) | |
470 ; | |
471 else | |
472 return false; | |
473 | |
474 if (!empty_block_p (middle_bb)) | |
475 return false; | |
476 | |
477 /* At this point we know we have a GIMPLE_COND with two successors. | |
478 One successor is BB, the other successor is an empty block which | |
479 falls through into BB. | |
480 | |
481 There is a single PHI node at the join point (BB) and its arguments | |
482 are constants (0, 1). | |
483 | |
484 So, given the condition COND, and the two PHI arguments, we can | |
485 rewrite this PHI into non-branching code: | |
486 | |
487 dest = (COND) or dest = COND' | |
488 | |
489 We use the condition as-is if the argument associated with the | |
490 true edge has the value one or the argument associated with the | |
491 false edge as the value zero. Note that those conditions are not | |
492 the same since only one of the outgoing edges from the GIMPLE_COND | |
493 will directly reach BB and thus be associated with an argument. */ | |
494 | |
495 stmt = last_stmt (cond_bb); | |
496 result = PHI_RESULT (phi); | |
497 | |
498 /* To handle special cases like floating point comparison, it is easier and | |
499 less error-prone to build a tree and gimplify it on the fly though it is | |
500 less efficient. */ | |
501 cond = fold_build2 (gimple_cond_code (stmt), boolean_type_node, | |
502 gimple_cond_lhs (stmt), gimple_cond_rhs (stmt)); | |
503 | |
504 /* We need to know which is the true edge and which is the false | |
505 edge so that we know when to invert the condition below. */ | |
506 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge); | |
507 if ((e0 == true_edge && integer_zerop (arg0)) | |
508 || (e0 == false_edge && integer_onep (arg0)) | |
509 || (e1 == true_edge && integer_zerop (arg1)) | |
510 || (e1 == false_edge && integer_onep (arg1))) | |
511 cond = fold_build1 (TRUTH_NOT_EXPR, TREE_TYPE (cond), cond); | |
512 | |
513 /* Insert our new statements at the end of conditional block before the | |
514 COND_STMT. */ | |
515 gsi = gsi_for_stmt (stmt); | |
516 new_var = force_gimple_operand_gsi (&gsi, cond, true, NULL, true, | |
517 GSI_SAME_STMT); | |
518 | |
519 if (!useless_type_conversion_p (TREE_TYPE (result), TREE_TYPE (new_var))) | |
520 { | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
521 source_location locus_0, locus_1; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
522 |
0 | 523 new_var2 = create_tmp_var (TREE_TYPE (result), NULL); |
524 add_referenced_var (new_var2); | |
525 new_stmt = gimple_build_assign_with_ops (CONVERT_EXPR, new_var2, | |
526 new_var, NULL); | |
527 new_var2 = make_ssa_name (new_var2, new_stmt); | |
528 gimple_assign_set_lhs (new_stmt, new_var2); | |
529 gsi_insert_before (&gsi, new_stmt, GSI_SAME_STMT); | |
530 new_var = new_var2; | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
531 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
532 /* Set the locus to the first argument, unless is doesn't have one. */ |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
533 locus_0 = gimple_phi_arg_location (phi, 0); |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
534 locus_1 = gimple_phi_arg_location (phi, 1); |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
535 if (locus_0 == UNKNOWN_LOCATION) |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
536 locus_0 = locus_1; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
537 gimple_set_location (new_stmt, locus_0); |
0 | 538 } |
539 | |
540 replace_phi_edge_with_variable (cond_bb, e1, phi, new_var); | |
541 | |
542 /* Note that we optimized this PHI. */ | |
543 return true; | |
544 } | |
545 | |
546 /* The function value_replacement does the main work of doing the value | |
547 replacement. Return true if the replacement is done. Otherwise return | |
548 false. | |
549 BB is the basic block where the replacement is going to be done on. ARG0 | |
550 is argument 0 from the PHI. Likewise for ARG1. */ | |
551 | |
552 static bool | |
553 value_replacement (basic_block cond_bb, basic_block middle_bb, | |
554 edge e0, edge e1, gimple phi, | |
555 tree arg0, tree arg1) | |
556 { | |
557 gimple cond; | |
558 edge true_edge, false_edge; | |
559 enum tree_code code; | |
560 | |
561 /* If the type says honor signed zeros we cannot do this | |
562 optimization. */ | |
563 if (HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (arg1)))) | |
564 return false; | |
565 | |
566 if (!empty_block_p (middle_bb)) | |
567 return false; | |
568 | |
569 cond = last_stmt (cond_bb); | |
570 code = gimple_cond_code (cond); | |
571 | |
572 /* This transformation is only valid for equality comparisons. */ | |
573 if (code != NE_EXPR && code != EQ_EXPR) | |
574 return false; | |
575 | |
576 /* We need to know which is the true edge and which is the false | |
577 edge so that we know if have abs or negative abs. */ | |
578 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge); | |
579 | |
580 /* At this point we know we have a COND_EXPR with two successors. | |
581 One successor is BB, the other successor is an empty block which | |
582 falls through into BB. | |
583 | |
584 The condition for the COND_EXPR is known to be NE_EXPR or EQ_EXPR. | |
585 | |
586 There is a single PHI node at the join point (BB) with two arguments. | |
587 | |
588 We now need to verify that the two arguments in the PHI node match | |
589 the two arguments to the equality comparison. */ | |
590 | |
591 if ((operand_equal_for_phi_arg_p (arg0, gimple_cond_lhs (cond)) | |
592 && operand_equal_for_phi_arg_p (arg1, gimple_cond_rhs (cond))) | |
593 || (operand_equal_for_phi_arg_p (arg1, gimple_cond_lhs (cond)) | |
594 && operand_equal_for_phi_arg_p (arg0, gimple_cond_rhs (cond)))) | |
595 { | |
596 edge e; | |
597 tree arg; | |
598 | |
599 /* For NE_EXPR, we want to build an assignment result = arg where | |
600 arg is the PHI argument associated with the true edge. For | |
601 EQ_EXPR we want the PHI argument associated with the false edge. */ | |
602 e = (code == NE_EXPR ? true_edge : false_edge); | |
603 | |
604 /* Unfortunately, E may not reach BB (it may instead have gone to | |
605 OTHER_BLOCK). If that is the case, then we want the single outgoing | |
606 edge from OTHER_BLOCK which reaches BB and represents the desired | |
607 path from COND_BLOCK. */ | |
608 if (e->dest == middle_bb) | |
609 e = single_succ_edge (e->dest); | |
610 | |
611 /* Now we know the incoming edge to BB that has the argument for the | |
612 RHS of our new assignment statement. */ | |
613 if (e0 == e) | |
614 arg = arg0; | |
615 else | |
616 arg = arg1; | |
617 | |
618 replace_phi_edge_with_variable (cond_bb, e1, phi, arg); | |
619 | |
620 /* Note that we optimized this PHI. */ | |
621 return true; | |
622 } | |
623 return false; | |
624 } | |
625 | |
626 /* The function minmax_replacement does the main work of doing the minmax | |
627 replacement. Return true if the replacement is done. Otherwise return | |
628 false. | |
629 BB is the basic block where the replacement is going to be done on. ARG0 | |
630 is argument 0 from the PHI. Likewise for ARG1. */ | |
631 | |
632 static bool | |
633 minmax_replacement (basic_block cond_bb, basic_block middle_bb, | |
634 edge e0, edge e1, gimple phi, | |
635 tree arg0, tree arg1) | |
636 { | |
637 tree result, type; | |
638 gimple cond, new_stmt; | |
639 edge true_edge, false_edge; | |
640 enum tree_code cmp, minmax, ass_code; | |
641 tree smaller, larger, arg_true, arg_false; | |
642 gimple_stmt_iterator gsi, gsi_from; | |
643 | |
644 type = TREE_TYPE (PHI_RESULT (phi)); | |
645 | |
646 /* The optimization may be unsafe due to NaNs. */ | |
647 if (HONOR_NANS (TYPE_MODE (type))) | |
648 return false; | |
649 | |
650 cond = last_stmt (cond_bb); | |
651 cmp = gimple_cond_code (cond); | |
652 result = PHI_RESULT (phi); | |
653 | |
654 /* This transformation is only valid for order comparisons. Record which | |
655 operand is smaller/larger if the result of the comparison is true. */ | |
656 if (cmp == LT_EXPR || cmp == LE_EXPR) | |
657 { | |
658 smaller = gimple_cond_lhs (cond); | |
659 larger = gimple_cond_rhs (cond); | |
660 } | |
661 else if (cmp == GT_EXPR || cmp == GE_EXPR) | |
662 { | |
663 smaller = gimple_cond_rhs (cond); | |
664 larger = gimple_cond_lhs (cond); | |
665 } | |
666 else | |
667 return false; | |
668 | |
669 /* We need to know which is the true edge and which is the false | |
670 edge so that we know if have abs or negative abs. */ | |
671 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge); | |
672 | |
673 /* Forward the edges over the middle basic block. */ | |
674 if (true_edge->dest == middle_bb) | |
675 true_edge = EDGE_SUCC (true_edge->dest, 0); | |
676 if (false_edge->dest == middle_bb) | |
677 false_edge = EDGE_SUCC (false_edge->dest, 0); | |
678 | |
679 if (true_edge == e0) | |
680 { | |
681 gcc_assert (false_edge == e1); | |
682 arg_true = arg0; | |
683 arg_false = arg1; | |
684 } | |
685 else | |
686 { | |
687 gcc_assert (false_edge == e0); | |
688 gcc_assert (true_edge == e1); | |
689 arg_true = arg1; | |
690 arg_false = arg0; | |
691 } | |
692 | |
693 if (empty_block_p (middle_bb)) | |
694 { | |
695 if (operand_equal_for_phi_arg_p (arg_true, smaller) | |
696 && operand_equal_for_phi_arg_p (arg_false, larger)) | |
697 { | |
698 /* Case | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
699 |
0 | 700 if (smaller < larger) |
701 rslt = smaller; | |
702 else | |
703 rslt = larger; */ | |
704 minmax = MIN_EXPR; | |
705 } | |
706 else if (operand_equal_for_phi_arg_p (arg_false, smaller) | |
707 && operand_equal_for_phi_arg_p (arg_true, larger)) | |
708 minmax = MAX_EXPR; | |
709 else | |
710 return false; | |
711 } | |
712 else | |
713 { | |
714 /* Recognize the following case, assuming d <= u: | |
715 | |
716 if (a <= u) | |
717 b = MAX (a, d); | |
718 x = PHI <b, u> | |
719 | |
720 This is equivalent to | |
721 | |
722 b = MAX (a, d); | |
723 x = MIN (b, u); */ | |
724 | |
725 gimple assign = last_and_only_stmt (middle_bb); | |
726 tree lhs, op0, op1, bound; | |
727 | |
728 if (!assign | |
729 || gimple_code (assign) != GIMPLE_ASSIGN) | |
730 return false; | |
731 | |
732 lhs = gimple_assign_lhs (assign); | |
733 ass_code = gimple_assign_rhs_code (assign); | |
734 if (ass_code != MAX_EXPR && ass_code != MIN_EXPR) | |
735 return false; | |
736 op0 = gimple_assign_rhs1 (assign); | |
737 op1 = gimple_assign_rhs2 (assign); | |
738 | |
739 if (true_edge->src == middle_bb) | |
740 { | |
741 /* We got here if the condition is true, i.e., SMALLER < LARGER. */ | |
742 if (!operand_equal_for_phi_arg_p (lhs, arg_true)) | |
743 return false; | |
744 | |
745 if (operand_equal_for_phi_arg_p (arg_false, larger)) | |
746 { | |
747 /* Case | |
748 | |
749 if (smaller < larger) | |
750 { | |
751 r' = MAX_EXPR (smaller, bound) | |
752 } | |
753 r = PHI <r', larger> --> to be turned to MIN_EXPR. */ | |
754 if (ass_code != MAX_EXPR) | |
755 return false; | |
756 | |
757 minmax = MIN_EXPR; | |
758 if (operand_equal_for_phi_arg_p (op0, smaller)) | |
759 bound = op1; | |
760 else if (operand_equal_for_phi_arg_p (op1, smaller)) | |
761 bound = op0; | |
762 else | |
763 return false; | |
764 | |
765 /* We need BOUND <= LARGER. */ | |
766 if (!integer_nonzerop (fold_build2 (LE_EXPR, boolean_type_node, | |
767 bound, larger))) | |
768 return false; | |
769 } | |
770 else if (operand_equal_for_phi_arg_p (arg_false, smaller)) | |
771 { | |
772 /* Case | |
773 | |
774 if (smaller < larger) | |
775 { | |
776 r' = MIN_EXPR (larger, bound) | |
777 } | |
778 r = PHI <r', smaller> --> to be turned to MAX_EXPR. */ | |
779 if (ass_code != MIN_EXPR) | |
780 return false; | |
781 | |
782 minmax = MAX_EXPR; | |
783 if (operand_equal_for_phi_arg_p (op0, larger)) | |
784 bound = op1; | |
785 else if (operand_equal_for_phi_arg_p (op1, larger)) | |
786 bound = op0; | |
787 else | |
788 return false; | |
789 | |
790 /* We need BOUND >= SMALLER. */ | |
791 if (!integer_nonzerop (fold_build2 (GE_EXPR, boolean_type_node, | |
792 bound, smaller))) | |
793 return false; | |
794 } | |
795 else | |
796 return false; | |
797 } | |
798 else | |
799 { | |
800 /* We got here if the condition is false, i.e., SMALLER > LARGER. */ | |
801 if (!operand_equal_for_phi_arg_p (lhs, arg_false)) | |
802 return false; | |
803 | |
804 if (operand_equal_for_phi_arg_p (arg_true, larger)) | |
805 { | |
806 /* Case | |
807 | |
808 if (smaller > larger) | |
809 { | |
810 r' = MIN_EXPR (smaller, bound) | |
811 } | |
812 r = PHI <r', larger> --> to be turned to MAX_EXPR. */ | |
813 if (ass_code != MIN_EXPR) | |
814 return false; | |
815 | |
816 minmax = MAX_EXPR; | |
817 if (operand_equal_for_phi_arg_p (op0, smaller)) | |
818 bound = op1; | |
819 else if (operand_equal_for_phi_arg_p (op1, smaller)) | |
820 bound = op0; | |
821 else | |
822 return false; | |
823 | |
824 /* We need BOUND >= LARGER. */ | |
825 if (!integer_nonzerop (fold_build2 (GE_EXPR, boolean_type_node, | |
826 bound, larger))) | |
827 return false; | |
828 } | |
829 else if (operand_equal_for_phi_arg_p (arg_true, smaller)) | |
830 { | |
831 /* Case | |
832 | |
833 if (smaller > larger) | |
834 { | |
835 r' = MAX_EXPR (larger, bound) | |
836 } | |
837 r = PHI <r', smaller> --> to be turned to MIN_EXPR. */ | |
838 if (ass_code != MAX_EXPR) | |
839 return false; | |
840 | |
841 minmax = MIN_EXPR; | |
842 if (operand_equal_for_phi_arg_p (op0, larger)) | |
843 bound = op1; | |
844 else if (operand_equal_for_phi_arg_p (op1, larger)) | |
845 bound = op0; | |
846 else | |
847 return false; | |
848 | |
849 /* We need BOUND <= SMALLER. */ | |
850 if (!integer_nonzerop (fold_build2 (LE_EXPR, boolean_type_node, | |
851 bound, smaller))) | |
852 return false; | |
853 } | |
854 else | |
855 return false; | |
856 } | |
857 | |
858 /* Move the statement from the middle block. */ | |
859 gsi = gsi_last_bb (cond_bb); | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
860 gsi_from = gsi_last_nondebug_bb (middle_bb); |
0 | 861 gsi_move_before (&gsi_from, &gsi); |
862 } | |
863 | |
864 /* Emit the statement to compute min/max. */ | |
865 result = duplicate_ssa_name (PHI_RESULT (phi), NULL); | |
866 new_stmt = gimple_build_assign_with_ops (minmax, result, arg0, arg1); | |
867 gsi = gsi_last_bb (cond_bb); | |
868 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT); | |
869 | |
870 replace_phi_edge_with_variable (cond_bb, e1, phi, result); | |
871 return true; | |
872 } | |
873 | |
874 /* The function absolute_replacement does the main work of doing the absolute | |
875 replacement. Return true if the replacement is done. Otherwise return | |
876 false. | |
877 bb is the basic block where the replacement is going to be done on. arg0 | |
878 is argument 0 from the phi. Likewise for arg1. */ | |
879 | |
880 static bool | |
881 abs_replacement (basic_block cond_bb, basic_block middle_bb, | |
882 edge e0 ATTRIBUTE_UNUSED, edge e1, | |
883 gimple phi, tree arg0, tree arg1) | |
884 { | |
885 tree result; | |
886 gimple new_stmt, cond; | |
887 gimple_stmt_iterator gsi; | |
888 edge true_edge, false_edge; | |
889 gimple assign; | |
890 edge e; | |
891 tree rhs, lhs; | |
892 bool negate; | |
893 enum tree_code cond_code; | |
894 | |
895 /* If the type says honor signed zeros we cannot do this | |
896 optimization. */ | |
897 if (HONOR_SIGNED_ZEROS (TYPE_MODE (TREE_TYPE (arg1)))) | |
898 return false; | |
899 | |
900 /* OTHER_BLOCK must have only one executable statement which must have the | |
901 form arg0 = -arg1 or arg1 = -arg0. */ | |
902 | |
903 assign = last_and_only_stmt (middle_bb); | |
904 /* If we did not find the proper negation assignment, then we can not | |
905 optimize. */ | |
906 if (assign == NULL) | |
907 return false; | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
908 |
0 | 909 /* If we got here, then we have found the only executable statement |
910 in OTHER_BLOCK. If it is anything other than arg = -arg1 or | |
911 arg1 = -arg0, then we can not optimize. */ | |
912 if (gimple_code (assign) != GIMPLE_ASSIGN) | |
913 return false; | |
914 | |
915 lhs = gimple_assign_lhs (assign); | |
916 | |
917 if (gimple_assign_rhs_code (assign) != NEGATE_EXPR) | |
918 return false; | |
919 | |
920 rhs = gimple_assign_rhs1 (assign); | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
921 |
0 | 922 /* The assignment has to be arg0 = -arg1 or arg1 = -arg0. */ |
923 if (!(lhs == arg0 && rhs == arg1) | |
924 && !(lhs == arg1 && rhs == arg0)) | |
925 return false; | |
926 | |
927 cond = last_stmt (cond_bb); | |
928 result = PHI_RESULT (phi); | |
929 | |
930 /* Only relationals comparing arg[01] against zero are interesting. */ | |
931 cond_code = gimple_cond_code (cond); | |
932 if (cond_code != GT_EXPR && cond_code != GE_EXPR | |
933 && cond_code != LT_EXPR && cond_code != LE_EXPR) | |
934 return false; | |
935 | |
936 /* Make sure the conditional is arg[01] OP y. */ | |
937 if (gimple_cond_lhs (cond) != rhs) | |
938 return false; | |
939 | |
940 if (FLOAT_TYPE_P (TREE_TYPE (gimple_cond_rhs (cond))) | |
941 ? real_zerop (gimple_cond_rhs (cond)) | |
942 : integer_zerop (gimple_cond_rhs (cond))) | |
943 ; | |
944 else | |
945 return false; | |
946 | |
947 /* We need to know which is the true edge and which is the false | |
948 edge so that we know if have abs or negative abs. */ | |
949 extract_true_false_edges_from_block (cond_bb, &true_edge, &false_edge); | |
950 | |
951 /* For GT_EXPR/GE_EXPR, if the true edge goes to OTHER_BLOCK, then we | |
952 will need to negate the result. Similarly for LT_EXPR/LE_EXPR if | |
953 the false edge goes to OTHER_BLOCK. */ | |
954 if (cond_code == GT_EXPR || cond_code == GE_EXPR) | |
955 e = true_edge; | |
956 else | |
957 e = false_edge; | |
958 | |
959 if (e->dest == middle_bb) | |
960 negate = true; | |
961 else | |
962 negate = false; | |
963 | |
964 result = duplicate_ssa_name (result, NULL); | |
965 | |
966 if (negate) | |
967 { | |
968 tree tmp = create_tmp_var (TREE_TYPE (result), NULL); | |
969 add_referenced_var (tmp); | |
970 lhs = make_ssa_name (tmp, NULL); | |
971 } | |
972 else | |
973 lhs = result; | |
974 | |
975 /* Build the modify expression with abs expression. */ | |
976 new_stmt = gimple_build_assign_with_ops (ABS_EXPR, lhs, rhs, NULL); | |
977 | |
978 gsi = gsi_last_bb (cond_bb); | |
979 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT); | |
980 | |
981 if (negate) | |
982 { | |
983 /* Get the right GSI. We want to insert after the recently | |
984 added ABS_EXPR statement (which we know is the first statement | |
985 in the block. */ | |
986 new_stmt = gimple_build_assign_with_ops (NEGATE_EXPR, result, lhs, NULL); | |
987 | |
988 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT); | |
989 } | |
990 | |
991 replace_phi_edge_with_variable (cond_bb, e1, phi, result); | |
992 | |
993 /* Note that we optimized this PHI. */ | |
994 return true; | |
995 } | |
996 | |
997 /* Auxiliary functions to determine the set of memory accesses which | |
998 can't trap because they are preceded by accesses to the same memory | |
999 portion. We do that for INDIRECT_REFs, so we only need to track | |
1000 the SSA_NAME of the pointer indirectly referenced. The algorithm | |
1001 simply is a walk over all instructions in dominator order. When | |
1002 we see an INDIRECT_REF we determine if we've already seen a same | |
1003 ref anywhere up to the root of the dominator tree. If we do the | |
1004 current access can't trap. If we don't see any dominating access | |
1005 the current access might trap, but might also make later accesses | |
1006 non-trapping, so we remember it. We need to be careful with loads | |
1007 or stores, for instance a load might not trap, while a store would, | |
1008 so if we see a dominating read access this doesn't mean that a later | |
1009 write access would not trap. Hence we also need to differentiate the | |
1010 type of access(es) seen. | |
1011 | |
1012 ??? We currently are very conservative and assume that a load might | |
1013 trap even if a store doesn't (write-only memory). This probably is | |
1014 overly conservative. */ | |
1015 | |
1016 /* A hash-table of SSA_NAMEs, and in which basic block an INDIRECT_REF | |
1017 through it was seen, which would constitute a no-trap region for | |
1018 same accesses. */ | |
1019 struct name_to_bb | |
1020 { | |
1021 tree ssa_name; | |
1022 basic_block bb; | |
1023 unsigned store : 1; | |
1024 }; | |
1025 | |
1026 /* The hash table for remembering what we've seen. */ | |
1027 static htab_t seen_ssa_names; | |
1028 | |
1029 /* The set of INDIRECT_REFs which can't trap. */ | |
1030 static struct pointer_set_t *nontrap_set; | |
1031 | |
1032 /* The hash function, based on the pointer to the pointer SSA_NAME. */ | |
1033 static hashval_t | |
1034 name_to_bb_hash (const void *p) | |
1035 { | |
1036 const_tree n = ((const struct name_to_bb *)p)->ssa_name; | |
1037 return htab_hash_pointer (n) ^ ((const struct name_to_bb *)p)->store; | |
1038 } | |
1039 | |
1040 /* The equality function of *P1 and *P2. SSA_NAMEs are shared, so | |
1041 it's enough to simply compare them for equality. */ | |
1042 static int | |
1043 name_to_bb_eq (const void *p1, const void *p2) | |
1044 { | |
1045 const struct name_to_bb *n1 = (const struct name_to_bb *)p1; | |
1046 const struct name_to_bb *n2 = (const struct name_to_bb *)p2; | |
1047 | |
1048 return n1->ssa_name == n2->ssa_name && n1->store == n2->store; | |
1049 } | |
1050 | |
1051 /* We see the expression EXP in basic block BB. If it's an interesting | |
1052 expression (an INDIRECT_REF through an SSA_NAME) possibly insert the | |
1053 expression into the set NONTRAP or the hash table of seen expressions. | |
1054 STORE is true if this expression is on the LHS, otherwise it's on | |
1055 the RHS. */ | |
1056 static void | |
1057 add_or_mark_expr (basic_block bb, tree exp, | |
1058 struct pointer_set_t *nontrap, bool store) | |
1059 { | |
1060 if (INDIRECT_REF_P (exp) | |
1061 && TREE_CODE (TREE_OPERAND (exp, 0)) == SSA_NAME) | |
1062 { | |
1063 tree name = TREE_OPERAND (exp, 0); | |
1064 struct name_to_bb map; | |
1065 void **slot; | |
1066 struct name_to_bb *n2bb; | |
1067 basic_block found_bb = 0; | |
1068 | |
1069 /* Try to find the last seen INDIRECT_REF through the same | |
1070 SSA_NAME, which can trap. */ | |
1071 map.ssa_name = name; | |
1072 map.bb = 0; | |
1073 map.store = store; | |
1074 slot = htab_find_slot (seen_ssa_names, &map, INSERT); | |
1075 n2bb = (struct name_to_bb *) *slot; | |
1076 if (n2bb) | |
1077 found_bb = n2bb->bb; | |
1078 | |
1079 /* If we've found a trapping INDIRECT_REF, _and_ it dominates EXP | |
1080 (it's in a basic block on the path from us to the dominator root) | |
1081 then we can't trap. */ | |
1082 if (found_bb && found_bb->aux == (void *)1) | |
1083 { | |
1084 pointer_set_insert (nontrap, exp); | |
1085 } | |
1086 else | |
1087 { | |
1088 /* EXP might trap, so insert it into the hash table. */ | |
1089 if (n2bb) | |
1090 { | |
1091 n2bb->bb = bb; | |
1092 } | |
1093 else | |
1094 { | |
1095 n2bb = XNEW (struct name_to_bb); | |
1096 n2bb->ssa_name = name; | |
1097 n2bb->bb = bb; | |
1098 n2bb->store = store; | |
1099 *slot = n2bb; | |
1100 } | |
1101 } | |
1102 } | |
1103 } | |
1104 | |
1105 /* Called by walk_dominator_tree, when entering the block BB. */ | |
1106 static void | |
1107 nt_init_block (struct dom_walk_data *data ATTRIBUTE_UNUSED, basic_block bb) | |
1108 { | |
1109 gimple_stmt_iterator gsi; | |
1110 /* Mark this BB as being on the path to dominator root. */ | |
1111 bb->aux = (void*)1; | |
1112 | |
1113 /* And walk the statements in order. */ | |
1114 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) | |
1115 { | |
1116 gimple stmt = gsi_stmt (gsi); | |
1117 | |
1118 if (is_gimple_assign (stmt)) | |
1119 { | |
1120 add_or_mark_expr (bb, gimple_assign_lhs (stmt), nontrap_set, true); | |
1121 add_or_mark_expr (bb, gimple_assign_rhs1 (stmt), nontrap_set, false); | |
1122 if (get_gimple_rhs_num_ops (gimple_assign_rhs_code (stmt)) > 1) | |
1123 add_or_mark_expr (bb, gimple_assign_rhs2 (stmt), nontrap_set, | |
1124 false); | |
1125 } | |
1126 } | |
1127 } | |
1128 | |
1129 /* Called by walk_dominator_tree, when basic block BB is exited. */ | |
1130 static void | |
1131 nt_fini_block (struct dom_walk_data *data ATTRIBUTE_UNUSED, basic_block bb) | |
1132 { | |
1133 /* This BB isn't on the path to dominator root anymore. */ | |
1134 bb->aux = NULL; | |
1135 } | |
1136 | |
1137 /* This is the entry point of gathering non trapping memory accesses. | |
1138 It will do a dominator walk over the whole function, and it will | |
1139 make use of the bb->aux pointers. It returns a set of trees | |
1140 (the INDIRECT_REFs itself) which can't trap. */ | |
1141 static struct pointer_set_t * | |
1142 get_non_trapping (void) | |
1143 { | |
1144 struct pointer_set_t *nontrap; | |
1145 struct dom_walk_data walk_data; | |
1146 | |
1147 nontrap = pointer_set_create (); | |
1148 seen_ssa_names = htab_create (128, name_to_bb_hash, name_to_bb_eq, | |
1149 free); | |
1150 /* We're going to do a dominator walk, so ensure that we have | |
1151 dominance information. */ | |
1152 calculate_dominance_info (CDI_DOMINATORS); | |
1153 | |
1154 /* Setup callbacks for the generic dominator tree walker. */ | |
1155 nontrap_set = nontrap; | |
1156 walk_data.dom_direction = CDI_DOMINATORS; | |
1157 walk_data.initialize_block_local_data = NULL; | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1158 walk_data.before_dom_children = nt_init_block; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1159 walk_data.after_dom_children = nt_fini_block; |
0 | 1160 walk_data.global_data = NULL; |
1161 walk_data.block_local_data_size = 0; | |
1162 | |
1163 init_walk_dominator_tree (&walk_data); | |
1164 walk_dominator_tree (&walk_data, ENTRY_BLOCK_PTR); | |
1165 fini_walk_dominator_tree (&walk_data); | |
1166 htab_delete (seen_ssa_names); | |
1167 | |
1168 return nontrap; | |
1169 } | |
1170 | |
1171 /* Do the main work of conditional store replacement. We already know | |
1172 that the recognized pattern looks like so: | |
1173 | |
1174 split: | |
1175 if (cond) goto MIDDLE_BB; else goto JOIN_BB (edge E1) | |
1176 MIDDLE_BB: | |
1177 something | |
1178 fallthrough (edge E0) | |
1179 JOIN_BB: | |
1180 some more | |
1181 | |
1182 We check that MIDDLE_BB contains only one store, that that store | |
1183 doesn't trap (not via NOTRAP, but via checking if an access to the same | |
1184 memory location dominates us) and that the store has a "simple" RHS. */ | |
1185 | |
1186 static bool | |
1187 cond_store_replacement (basic_block middle_bb, basic_block join_bb, | |
1188 edge e0, edge e1, struct pointer_set_t *nontrap) | |
1189 { | |
1190 gimple assign = last_and_only_stmt (middle_bb); | |
1191 tree lhs, rhs, name; | |
1192 gimple newphi, new_stmt; | |
1193 gimple_stmt_iterator gsi; | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1194 source_location locus; |
0 | 1195 enum tree_code code; |
1196 | |
1197 /* Check if middle_bb contains of only one store. */ | |
1198 if (!assign | |
1199 || gimple_code (assign) != GIMPLE_ASSIGN) | |
1200 return false; | |
1201 | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1202 locus = gimple_location (assign); |
0 | 1203 lhs = gimple_assign_lhs (assign); |
1204 rhs = gimple_assign_rhs1 (assign); | |
1205 if (!INDIRECT_REF_P (lhs)) | |
1206 return false; | |
1207 | |
1208 /* RHS is either a single SSA_NAME or a constant. */ | |
1209 code = gimple_assign_rhs_code (assign); | |
1210 if (get_gimple_rhs_class (code) != GIMPLE_SINGLE_RHS | |
1211 || (code != SSA_NAME && !is_gimple_min_invariant (rhs))) | |
1212 return false; | |
1213 /* Prove that we can move the store down. We could also check | |
1214 TREE_THIS_NOTRAP here, but in that case we also could move stores, | |
1215 whose value is not available readily, which we want to avoid. */ | |
1216 if (!pointer_set_contains (nontrap, lhs)) | |
1217 return false; | |
1218 | |
1219 /* Now we've checked the constraints, so do the transformation: | |
1220 1) Remove the single store. */ | |
1221 mark_symbols_for_renaming (assign); | |
1222 gsi = gsi_for_stmt (assign); | |
1223 gsi_remove (&gsi, true); | |
1224 | |
1225 /* 2) Create a temporary where we can store the old content | |
1226 of the memory touched by the store, if we need to. */ | |
1227 if (!condstoretemp || TREE_TYPE (lhs) != TREE_TYPE (condstoretemp)) | |
1228 { | |
1229 condstoretemp = create_tmp_var (TREE_TYPE (lhs), "cstore"); | |
1230 get_var_ann (condstoretemp); | |
1231 if (TREE_CODE (TREE_TYPE (lhs)) == COMPLEX_TYPE | |
1232 || TREE_CODE (TREE_TYPE (lhs)) == VECTOR_TYPE) | |
1233 DECL_GIMPLE_REG_P (condstoretemp) = 1; | |
1234 } | |
1235 add_referenced_var (condstoretemp); | |
1236 | |
1237 /* 3) Insert a load from the memory of the store to the temporary | |
1238 on the edge which did not contain the store. */ | |
1239 lhs = unshare_expr (lhs); | |
1240 new_stmt = gimple_build_assign (condstoretemp, lhs); | |
1241 name = make_ssa_name (condstoretemp, new_stmt); | |
1242 gimple_assign_set_lhs (new_stmt, name); | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1243 gimple_set_location (new_stmt, locus); |
0 | 1244 mark_symbols_for_renaming (new_stmt); |
1245 gsi_insert_on_edge (e1, new_stmt); | |
1246 | |
1247 /* 4) Create a PHI node at the join block, with one argument | |
1248 holding the old RHS, and the other holding the temporary | |
1249 where we stored the old memory contents. */ | |
1250 newphi = create_phi_node (condstoretemp, join_bb); | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1251 add_phi_arg (newphi, rhs, e0, locus); |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1252 add_phi_arg (newphi, name, e1, locus); |
0 | 1253 |
1254 lhs = unshare_expr (lhs); | |
1255 new_stmt = gimple_build_assign (lhs, PHI_RESULT (newphi)); | |
1256 mark_symbols_for_renaming (new_stmt); | |
1257 | |
1258 /* 5) Insert that PHI node. */ | |
1259 gsi = gsi_after_labels (join_bb); | |
1260 if (gsi_end_p (gsi)) | |
1261 { | |
1262 gsi = gsi_last_bb (join_bb); | |
1263 gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT); | |
1264 } | |
1265 else | |
1266 gsi_insert_before (&gsi, new_stmt, GSI_NEW_STMT); | |
1267 | |
1268 return true; | |
1269 } | |
1270 | |
1271 /* Always do these optimizations if we have SSA | |
1272 trees to work on. */ | |
1273 static bool | |
1274 gate_phiopt (void) | |
1275 { | |
1276 return 1; | |
1277 } | |
1278 | |
1279 struct gimple_opt_pass pass_phiopt = | |
1280 { | |
1281 { | |
1282 GIMPLE_PASS, | |
1283 "phiopt", /* name */ | |
1284 gate_phiopt, /* gate */ | |
1285 tree_ssa_phiopt, /* execute */ | |
1286 NULL, /* sub */ | |
1287 NULL, /* next */ | |
1288 0, /* static_pass_number */ | |
1289 TV_TREE_PHIOPT, /* tv_id */ | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1290 PROP_cfg | PROP_ssa, /* properties_required */ |
0 | 1291 0, /* properties_provided */ |
1292 0, /* properties_destroyed */ | |
1293 0, /* todo_flags_start */ | |
1294 TODO_dump_func | |
1295 | TODO_ggc_collect | |
1296 | TODO_verify_ssa | |
1297 | TODO_verify_flow | |
1298 | TODO_verify_stmts /* todo_flags_finish */ | |
1299 } | |
1300 }; | |
1301 | |
1302 static bool | |
1303 gate_cselim (void) | |
1304 { | |
1305 return flag_tree_cselim; | |
1306 } | |
1307 | |
1308 struct gimple_opt_pass pass_cselim = | |
1309 { | |
1310 { | |
1311 GIMPLE_PASS, | |
1312 "cselim", /* name */ | |
1313 gate_cselim, /* gate */ | |
1314 tree_ssa_cs_elim, /* execute */ | |
1315 NULL, /* sub */ | |
1316 NULL, /* next */ | |
1317 0, /* static_pass_number */ | |
1318 TV_TREE_PHIOPT, /* tv_id */ | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1319 PROP_cfg | PROP_ssa, /* properties_required */ |
0 | 1320 0, /* properties_provided */ |
1321 0, /* properties_destroyed */ | |
1322 0, /* todo_flags_start */ | |
1323 TODO_dump_func | |
1324 | TODO_ggc_collect | |
1325 | TODO_verify_ssa | |
1326 | TODO_verify_flow | |
1327 | TODO_verify_stmts /* todo_flags_finish */ | |
1328 } | |
1329 }; |