Mercurial > hg > CbC > CbC_gcc
annotate gcc/tree-ssa-propagate.c @ 56:3c8a44c06a95
Added tag gcc-4.4.5 for changeset 77e2b8dfacca
author | ryoma <e075725@ie.u-ryukyu.ac.jp> |
---|---|
date | Fri, 12 Feb 2010 23:41:23 +0900 |
parents | 77e2b8dfacca |
children | b7f97abdc517 |
rev | line source |
---|---|
0 | 1 /* Generic SSA value propagation engine. |
2 Copyright (C) 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. | |
3 Contributed by Diego Novillo <dnovillo@redhat.com> | |
4 | |
5 This file is part of GCC. | |
6 | |
7 GCC is free software; you can redistribute it and/or modify it | |
8 under the terms of the GNU General Public License as published by the | |
9 Free Software Foundation; either version 3, or (at your option) any | |
10 later version. | |
11 | |
12 GCC is distributed in the hope that it will be useful, but WITHOUT | |
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
15 for more details. | |
16 | |
17 You should have received a copy of the GNU General Public License | |
18 along with GCC; see the file COPYING3. If not see | |
19 <http://www.gnu.org/licenses/>. */ | |
20 | |
21 #include "config.h" | |
22 #include "system.h" | |
23 #include "coretypes.h" | |
24 #include "tm.h" | |
25 #include "tree.h" | |
26 #include "flags.h" | |
27 #include "rtl.h" | |
28 #include "tm_p.h" | |
29 #include "ggc.h" | |
30 #include "basic-block.h" | |
31 #include "output.h" | |
32 #include "expr.h" | |
33 #include "function.h" | |
34 #include "diagnostic.h" | |
35 #include "timevar.h" | |
36 #include "tree-dump.h" | |
37 #include "tree-flow.h" | |
38 #include "tree-pass.h" | |
39 #include "tree-ssa-propagate.h" | |
40 #include "langhooks.h" | |
41 #include "varray.h" | |
42 #include "vec.h" | |
43 #include "value-prof.h" | |
44 #include "gimple.h" | |
45 | |
46 /* This file implements a generic value propagation engine based on | |
47 the same propagation used by the SSA-CCP algorithm [1]. | |
48 | |
49 Propagation is performed by simulating the execution of every | |
50 statement that produces the value being propagated. Simulation | |
51 proceeds as follows: | |
52 | |
53 1- Initially, all edges of the CFG are marked not executable and | |
54 the CFG worklist is seeded with all the statements in the entry | |
55 basic block (block 0). | |
56 | |
57 2- Every statement S is simulated with a call to the call-back | |
58 function SSA_PROP_VISIT_STMT. This evaluation may produce 3 | |
59 results: | |
60 | |
61 SSA_PROP_NOT_INTERESTING: Statement S produces nothing of | |
62 interest and does not affect any of the work lists. | |
63 | |
64 SSA_PROP_VARYING: The value produced by S cannot be determined | |
65 at compile time. Further simulation of S is not required. | |
66 If S is a conditional jump, all the outgoing edges for the | |
67 block are considered executable and added to the work | |
68 list. | |
69 | |
70 SSA_PROP_INTERESTING: S produces a value that can be computed | |
71 at compile time. Its result can be propagated into the | |
72 statements that feed from S. Furthermore, if S is a | |
73 conditional jump, only the edge known to be taken is added | |
74 to the work list. Edges that are known not to execute are | |
75 never simulated. | |
76 | |
77 3- PHI nodes are simulated with a call to SSA_PROP_VISIT_PHI. The | |
78 return value from SSA_PROP_VISIT_PHI has the same semantics as | |
79 described in #2. | |
80 | |
81 4- Three work lists are kept. Statements are only added to these | |
82 lists if they produce one of SSA_PROP_INTERESTING or | |
83 SSA_PROP_VARYING. | |
84 | |
85 CFG_BLOCKS contains the list of blocks to be simulated. | |
86 Blocks are added to this list if their incoming edges are | |
87 found executable. | |
88 | |
89 VARYING_SSA_EDGES contains the list of statements that feed | |
90 from statements that produce an SSA_PROP_VARYING result. | |
91 These are simulated first to speed up processing. | |
92 | |
93 INTERESTING_SSA_EDGES contains the list of statements that | |
94 feed from statements that produce an SSA_PROP_INTERESTING | |
95 result. | |
96 | |
97 5- Simulation terminates when all three work lists are drained. | |
98 | |
99 Before calling ssa_propagate, it is important to clear | |
100 prop_simulate_again_p for all the statements in the program that | |
101 should be simulated. This initialization allows an implementation | |
102 to specify which statements should never be simulated. | |
103 | |
104 It is also important to compute def-use information before calling | |
105 ssa_propagate. | |
106 | |
107 References: | |
108 | |
109 [1] Constant propagation with conditional branches, | |
110 Wegman and Zadeck, ACM TOPLAS 13(2):181-210. | |
111 | |
112 [2] Building an Optimizing Compiler, | |
113 Robert Morgan, Butterworth-Heinemann, 1998, Section 8.9. | |
114 | |
115 [3] Advanced Compiler Design and Implementation, | |
116 Steven Muchnick, Morgan Kaufmann, 1997, Section 12.6 */ | |
117 | |
118 /* Function pointers used to parameterize the propagation engine. */ | |
119 static ssa_prop_visit_stmt_fn ssa_prop_visit_stmt; | |
120 static ssa_prop_visit_phi_fn ssa_prop_visit_phi; | |
121 | |
122 /* Keep track of statements that have been added to one of the SSA | |
123 edges worklists. This flag is used to avoid visiting statements | |
124 unnecessarily when draining an SSA edge worklist. If while | |
125 simulating a basic block, we find a statement with | |
126 STMT_IN_SSA_EDGE_WORKLIST set, we clear it to prevent SSA edge | |
127 processing from visiting it again. | |
128 | |
129 NOTE: users of the propagation engine are not allowed to use | |
130 the GF_PLF_1 flag. */ | |
131 #define STMT_IN_SSA_EDGE_WORKLIST GF_PLF_1 | |
132 | |
133 /* A bitmap to keep track of executable blocks in the CFG. */ | |
134 static sbitmap executable_blocks; | |
135 | |
136 /* Array of control flow edges on the worklist. */ | |
137 static VEC(basic_block,heap) *cfg_blocks; | |
138 | |
139 static unsigned int cfg_blocks_num = 0; | |
140 static int cfg_blocks_tail; | |
141 static int cfg_blocks_head; | |
142 | |
143 static sbitmap bb_in_list; | |
144 | |
145 /* Worklist of SSA edges which will need reexamination as their | |
146 definition has changed. SSA edges are def-use edges in the SSA | |
147 web. For each D-U edge, we store the target statement or PHI node | |
148 U. */ | |
149 static GTY(()) VEC(gimple,gc) *interesting_ssa_edges; | |
150 | |
151 /* Identical to INTERESTING_SSA_EDGES. For performance reasons, the | |
152 list of SSA edges is split into two. One contains all SSA edges | |
153 who need to be reexamined because their lattice value changed to | |
154 varying (this worklist), and the other contains all other SSA edges | |
155 to be reexamined (INTERESTING_SSA_EDGES). | |
156 | |
157 Since most values in the program are VARYING, the ideal situation | |
158 is to move them to that lattice value as quickly as possible. | |
159 Thus, it doesn't make sense to process any other type of lattice | |
160 value until all VARYING values are propagated fully, which is one | |
161 thing using the VARYING worklist achieves. In addition, if we | |
162 don't use a separate worklist for VARYING edges, we end up with | |
163 situations where lattice values move from | |
164 UNDEFINED->INTERESTING->VARYING instead of UNDEFINED->VARYING. */ | |
165 static GTY(()) VEC(gimple,gc) *varying_ssa_edges; | |
166 | |
167 | |
168 /* Return true if the block worklist empty. */ | |
169 | |
170 static inline bool | |
171 cfg_blocks_empty_p (void) | |
172 { | |
173 return (cfg_blocks_num == 0); | |
174 } | |
175 | |
176 | |
177 /* Add a basic block to the worklist. The block must not be already | |
178 in the worklist, and it must not be the ENTRY or EXIT block. */ | |
179 | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
180 static void |
0 | 181 cfg_blocks_add (basic_block bb) |
182 { | |
183 bool head = false; | |
184 | |
185 gcc_assert (bb != ENTRY_BLOCK_PTR && bb != EXIT_BLOCK_PTR); | |
186 gcc_assert (!TEST_BIT (bb_in_list, bb->index)); | |
187 | |
188 if (cfg_blocks_empty_p ()) | |
189 { | |
190 cfg_blocks_tail = cfg_blocks_head = 0; | |
191 cfg_blocks_num = 1; | |
192 } | |
193 else | |
194 { | |
195 cfg_blocks_num++; | |
196 if (cfg_blocks_num > VEC_length (basic_block, cfg_blocks)) | |
197 { | |
198 /* We have to grow the array now. Adjust to queue to occupy | |
199 the full space of the original array. We do not need to | |
200 initialize the newly allocated portion of the array | |
201 because we keep track of CFG_BLOCKS_HEAD and | |
202 CFG_BLOCKS_HEAD. */ | |
203 cfg_blocks_tail = VEC_length (basic_block, cfg_blocks); | |
204 cfg_blocks_head = 0; | |
205 VEC_safe_grow (basic_block, heap, cfg_blocks, 2 * cfg_blocks_tail); | |
206 } | |
207 /* Minor optimization: we prefer to see blocks with more | |
208 predecessors later, because there is more of a chance that | |
209 the incoming edges will be executable. */ | |
210 else if (EDGE_COUNT (bb->preds) | |
211 >= EDGE_COUNT (VEC_index (basic_block, cfg_blocks, | |
212 cfg_blocks_head)->preds)) | |
213 cfg_blocks_tail = ((cfg_blocks_tail + 1) | |
214 % VEC_length (basic_block, cfg_blocks)); | |
215 else | |
216 { | |
217 if (cfg_blocks_head == 0) | |
218 cfg_blocks_head = VEC_length (basic_block, cfg_blocks); | |
219 --cfg_blocks_head; | |
220 head = true; | |
221 } | |
222 } | |
223 | |
224 VEC_replace (basic_block, cfg_blocks, | |
225 head ? cfg_blocks_head : cfg_blocks_tail, | |
226 bb); | |
227 SET_BIT (bb_in_list, bb->index); | |
228 } | |
229 | |
230 | |
231 /* Remove a block from the worklist. */ | |
232 | |
233 static basic_block | |
234 cfg_blocks_get (void) | |
235 { | |
236 basic_block bb; | |
237 | |
238 bb = VEC_index (basic_block, cfg_blocks, cfg_blocks_head); | |
239 | |
240 gcc_assert (!cfg_blocks_empty_p ()); | |
241 gcc_assert (bb); | |
242 | |
243 cfg_blocks_head = ((cfg_blocks_head + 1) | |
244 % VEC_length (basic_block, cfg_blocks)); | |
245 --cfg_blocks_num; | |
246 RESET_BIT (bb_in_list, bb->index); | |
247 | |
248 return bb; | |
249 } | |
250 | |
251 | |
252 /* We have just defined a new value for VAR. If IS_VARYING is true, | |
253 add all immediate uses of VAR to VARYING_SSA_EDGES, otherwise add | |
254 them to INTERESTING_SSA_EDGES. */ | |
255 | |
256 static void | |
257 add_ssa_edge (tree var, bool is_varying) | |
258 { | |
259 imm_use_iterator iter; | |
260 use_operand_p use_p; | |
261 | |
262 FOR_EACH_IMM_USE_FAST (use_p, iter, var) | |
263 { | |
264 gimple use_stmt = USE_STMT (use_p); | |
265 | |
266 if (prop_simulate_again_p (use_stmt) | |
267 && !gimple_plf (use_stmt, STMT_IN_SSA_EDGE_WORKLIST)) | |
268 { | |
269 gimple_set_plf (use_stmt, STMT_IN_SSA_EDGE_WORKLIST, true); | |
270 if (is_varying) | |
271 VEC_safe_push (gimple, gc, varying_ssa_edges, use_stmt); | |
272 else | |
273 VEC_safe_push (gimple, gc, interesting_ssa_edges, use_stmt); | |
274 } | |
275 } | |
276 } | |
277 | |
278 | |
279 /* Add edge E to the control flow worklist. */ | |
280 | |
281 static void | |
282 add_control_edge (edge e) | |
283 { | |
284 basic_block bb = e->dest; | |
285 if (bb == EXIT_BLOCK_PTR) | |
286 return; | |
287 | |
288 /* If the edge had already been executed, skip it. */ | |
289 if (e->flags & EDGE_EXECUTABLE) | |
290 return; | |
291 | |
292 e->flags |= EDGE_EXECUTABLE; | |
293 | |
294 /* If the block is already in the list, we're done. */ | |
295 if (TEST_BIT (bb_in_list, bb->index)) | |
296 return; | |
297 | |
298 cfg_blocks_add (bb); | |
299 | |
300 if (dump_file && (dump_flags & TDF_DETAILS)) | |
301 fprintf (dump_file, "Adding Destination of edge (%d -> %d) to worklist\n\n", | |
302 e->src->index, e->dest->index); | |
303 } | |
304 | |
305 | |
306 /* Simulate the execution of STMT and update the work lists accordingly. */ | |
307 | |
308 static void | |
309 simulate_stmt (gimple stmt) | |
310 { | |
311 enum ssa_prop_result val = SSA_PROP_NOT_INTERESTING; | |
312 edge taken_edge = NULL; | |
313 tree output_name = NULL_TREE; | |
314 | |
315 /* Don't bother visiting statements that are already | |
316 considered varying by the propagator. */ | |
317 if (!prop_simulate_again_p (stmt)) | |
318 return; | |
319 | |
320 if (gimple_code (stmt) == GIMPLE_PHI) | |
321 { | |
322 val = ssa_prop_visit_phi (stmt); | |
323 output_name = gimple_phi_result (stmt); | |
324 } | |
325 else | |
326 val = ssa_prop_visit_stmt (stmt, &taken_edge, &output_name); | |
327 | |
328 if (val == SSA_PROP_VARYING) | |
329 { | |
330 prop_set_simulate_again (stmt, false); | |
331 | |
332 /* If the statement produced a new varying value, add the SSA | |
333 edges coming out of OUTPUT_NAME. */ | |
334 if (output_name) | |
335 add_ssa_edge (output_name, true); | |
336 | |
337 /* If STMT transfers control out of its basic block, add | |
338 all outgoing edges to the work list. */ | |
339 if (stmt_ends_bb_p (stmt)) | |
340 { | |
341 edge e; | |
342 edge_iterator ei; | |
343 basic_block bb = gimple_bb (stmt); | |
344 FOR_EACH_EDGE (e, ei, bb->succs) | |
345 add_control_edge (e); | |
346 } | |
347 } | |
348 else if (val == SSA_PROP_INTERESTING) | |
349 { | |
350 /* If the statement produced new value, add the SSA edges coming | |
351 out of OUTPUT_NAME. */ | |
352 if (output_name) | |
353 add_ssa_edge (output_name, false); | |
354 | |
355 /* If we know which edge is going to be taken out of this block, | |
356 add it to the CFG work list. */ | |
357 if (taken_edge) | |
358 add_control_edge (taken_edge); | |
359 } | |
360 } | |
361 | |
362 /* Process an SSA edge worklist. WORKLIST is the SSA edge worklist to | |
363 drain. This pops statements off the given WORKLIST and processes | |
364 them until there are no more statements on WORKLIST. | |
365 We take a pointer to WORKLIST because it may be reallocated when an | |
366 SSA edge is added to it in simulate_stmt. */ | |
367 | |
368 static void | |
369 process_ssa_edge_worklist (VEC(gimple,gc) **worklist) | |
370 { | |
371 /* Drain the entire worklist. */ | |
372 while (VEC_length (gimple, *worklist) > 0) | |
373 { | |
374 basic_block bb; | |
375 | |
376 /* Pull the statement to simulate off the worklist. */ | |
377 gimple stmt = VEC_pop (gimple, *worklist); | |
378 | |
379 /* If this statement was already visited by simulate_block, then | |
380 we don't need to visit it again here. */ | |
381 if (!gimple_plf (stmt, STMT_IN_SSA_EDGE_WORKLIST)) | |
382 continue; | |
383 | |
384 /* STMT is no longer in a worklist. */ | |
385 gimple_set_plf (stmt, STMT_IN_SSA_EDGE_WORKLIST, false); | |
386 | |
387 if (dump_file && (dump_flags & TDF_DETAILS)) | |
388 { | |
389 fprintf (dump_file, "\nSimulating statement (from ssa_edges): "); | |
390 print_gimple_stmt (dump_file, stmt, 0, dump_flags); | |
391 } | |
392 | |
393 bb = gimple_bb (stmt); | |
394 | |
395 /* PHI nodes are always visited, regardless of whether or not | |
396 the destination block is executable. Otherwise, visit the | |
397 statement only if its block is marked executable. */ | |
398 if (gimple_code (stmt) == GIMPLE_PHI | |
399 || TEST_BIT (executable_blocks, bb->index)) | |
400 simulate_stmt (stmt); | |
401 } | |
402 } | |
403 | |
404 | |
405 /* Simulate the execution of BLOCK. Evaluate the statement associated | |
406 with each variable reference inside the block. */ | |
407 | |
408 static void | |
409 simulate_block (basic_block block) | |
410 { | |
411 gimple_stmt_iterator gsi; | |
412 | |
413 /* There is nothing to do for the exit block. */ | |
414 if (block == EXIT_BLOCK_PTR) | |
415 return; | |
416 | |
417 if (dump_file && (dump_flags & TDF_DETAILS)) | |
418 fprintf (dump_file, "\nSimulating block %d\n", block->index); | |
419 | |
420 /* Always simulate PHI nodes, even if we have simulated this block | |
421 before. */ | |
422 for (gsi = gsi_start_phis (block); !gsi_end_p (gsi); gsi_next (&gsi)) | |
423 simulate_stmt (gsi_stmt (gsi)); | |
424 | |
425 /* If this is the first time we've simulated this block, then we | |
426 must simulate each of its statements. */ | |
427 if (!TEST_BIT (executable_blocks, block->index)) | |
428 { | |
429 gimple_stmt_iterator j; | |
430 unsigned int normal_edge_count; | |
431 edge e, normal_edge; | |
432 edge_iterator ei; | |
433 | |
434 /* Note that we have simulated this block. */ | |
435 SET_BIT (executable_blocks, block->index); | |
436 | |
437 for (j = gsi_start_bb (block); !gsi_end_p (j); gsi_next (&j)) | |
438 { | |
439 gimple stmt = gsi_stmt (j); | |
440 | |
441 /* If this statement is already in the worklist then | |
442 "cancel" it. The reevaluation implied by the worklist | |
443 entry will produce the same value we generate here and | |
444 thus reevaluating it again from the worklist is | |
445 pointless. */ | |
446 if (gimple_plf (stmt, STMT_IN_SSA_EDGE_WORKLIST)) | |
447 gimple_set_plf (stmt, STMT_IN_SSA_EDGE_WORKLIST, false); | |
448 | |
449 simulate_stmt (stmt); | |
450 } | |
451 | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
452 /* We can not predict when abnormal and EH edges will be executed, so |
0 | 453 once a block is considered executable, we consider any |
454 outgoing abnormal edges as executable. | |
455 | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
456 TODO: This is not exactly true. Simplifying statement might |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
457 prove it non-throwing and also computed goto can be handled |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
458 when destination is known. |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
459 |
0 | 460 At the same time, if this block has only one successor that is |
461 reached by non-abnormal edges, then add that successor to the | |
462 worklist. */ | |
463 normal_edge_count = 0; | |
464 normal_edge = NULL; | |
465 FOR_EACH_EDGE (e, ei, block->succs) | |
466 { | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
467 if (e->flags & (EDGE_ABNORMAL | EDGE_EH)) |
0 | 468 add_control_edge (e); |
469 else | |
470 { | |
471 normal_edge_count++; | |
472 normal_edge = e; | |
473 } | |
474 } | |
475 | |
476 if (normal_edge_count == 1) | |
477 add_control_edge (normal_edge); | |
478 } | |
479 } | |
480 | |
481 | |
482 /* Initialize local data structures and work lists. */ | |
483 | |
484 static void | |
485 ssa_prop_init (void) | |
486 { | |
487 edge e; | |
488 edge_iterator ei; | |
489 basic_block bb; | |
490 | |
491 /* Worklists of SSA edges. */ | |
492 interesting_ssa_edges = VEC_alloc (gimple, gc, 20); | |
493 varying_ssa_edges = VEC_alloc (gimple, gc, 20); | |
494 | |
495 executable_blocks = sbitmap_alloc (last_basic_block); | |
496 sbitmap_zero (executable_blocks); | |
497 | |
498 bb_in_list = sbitmap_alloc (last_basic_block); | |
499 sbitmap_zero (bb_in_list); | |
500 | |
501 if (dump_file && (dump_flags & TDF_DETAILS)) | |
502 dump_immediate_uses (dump_file); | |
503 | |
504 cfg_blocks = VEC_alloc (basic_block, heap, 20); | |
505 VEC_safe_grow (basic_block, heap, cfg_blocks, 20); | |
506 | |
507 /* Initially assume that every edge in the CFG is not executable. | |
508 (including the edges coming out of ENTRY_BLOCK_PTR). */ | |
509 FOR_ALL_BB (bb) | |
510 { | |
511 gimple_stmt_iterator si; | |
512 | |
513 for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si)) | |
514 gimple_set_plf (gsi_stmt (si), STMT_IN_SSA_EDGE_WORKLIST, false); | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
515 |
0 | 516 for (si = gsi_start_phis (bb); !gsi_end_p (si); gsi_next (&si)) |
517 gimple_set_plf (gsi_stmt (si), STMT_IN_SSA_EDGE_WORKLIST, false); | |
518 | |
519 FOR_EACH_EDGE (e, ei, bb->succs) | |
520 e->flags &= ~EDGE_EXECUTABLE; | |
521 } | |
522 | |
523 /* Seed the algorithm by adding the successors of the entry block to the | |
524 edge worklist. */ | |
525 FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR->succs) | |
526 add_control_edge (e); | |
527 } | |
528 | |
529 | |
530 /* Free allocated storage. */ | |
531 | |
532 static void | |
533 ssa_prop_fini (void) | |
534 { | |
535 VEC_free (gimple, gc, interesting_ssa_edges); | |
536 VEC_free (gimple, gc, varying_ssa_edges); | |
537 VEC_free (basic_block, heap, cfg_blocks); | |
538 cfg_blocks = NULL; | |
539 sbitmap_free (bb_in_list); | |
540 sbitmap_free (executable_blocks); | |
541 } | |
542 | |
543 | |
544 /* Return true if EXPR is an acceptable right-hand-side for a | |
545 GIMPLE assignment. We validate the entire tree, not just | |
546 the root node, thus catching expressions that embed complex | |
547 operands that are not permitted in GIMPLE. This function | |
548 is needed because the folding routines in fold-const.c | |
549 may return such expressions in some cases, e.g., an array | |
550 access with an embedded index addition. It may make more | |
551 sense to have folding routines that are sensitive to the | |
552 constraints on GIMPLE operands, rather than abandoning any | |
553 any attempt to fold if the usual folding turns out to be too | |
554 aggressive. */ | |
555 | |
556 bool | |
557 valid_gimple_rhs_p (tree expr) | |
558 { | |
559 enum tree_code code = TREE_CODE (expr); | |
560 | |
561 switch (TREE_CODE_CLASS (code)) | |
562 { | |
563 case tcc_declaration: | |
564 if (!is_gimple_variable (expr)) | |
565 return false; | |
566 break; | |
567 | |
568 case tcc_constant: | |
569 /* All constants are ok. */ | |
570 break; | |
571 | |
572 case tcc_binary: | |
573 case tcc_comparison: | |
574 if (!is_gimple_val (TREE_OPERAND (expr, 0)) | |
575 || !is_gimple_val (TREE_OPERAND (expr, 1))) | |
576 return false; | |
577 break; | |
578 | |
579 case tcc_unary: | |
580 if (!is_gimple_val (TREE_OPERAND (expr, 0))) | |
581 return false; | |
582 break; | |
583 | |
584 case tcc_expression: | |
585 switch (code) | |
586 { | |
587 case ADDR_EXPR: | |
588 { | |
589 tree t; | |
590 if (is_gimple_min_invariant (expr)) | |
591 return true; | |
592 t = TREE_OPERAND (expr, 0); | |
593 while (handled_component_p (t)) | |
594 { | |
595 /* ??? More checks needed, see the GIMPLE verifier. */ | |
596 if ((TREE_CODE (t) == ARRAY_REF | |
597 || TREE_CODE (t) == ARRAY_RANGE_REF) | |
598 && !is_gimple_val (TREE_OPERAND (t, 1))) | |
599 return false; | |
600 t = TREE_OPERAND (t, 0); | |
601 } | |
602 if (!is_gimple_id (t)) | |
603 return false; | |
604 } | |
605 break; | |
606 | |
607 case TRUTH_NOT_EXPR: | |
608 if (!is_gimple_val (TREE_OPERAND (expr, 0))) | |
609 return false; | |
610 break; | |
611 | |
612 case TRUTH_AND_EXPR: | |
613 case TRUTH_XOR_EXPR: | |
614 case TRUTH_OR_EXPR: | |
615 if (!is_gimple_val (TREE_OPERAND (expr, 0)) | |
616 || !is_gimple_val (TREE_OPERAND (expr, 1))) | |
617 return false; | |
618 break; | |
619 | |
620 default: | |
621 return false; | |
622 } | |
623 break; | |
624 | |
625 case tcc_vl_exp: | |
626 return false; | |
627 | |
628 case tcc_exceptional: | |
629 if (code != SSA_NAME) | |
630 return false; | |
631 break; | |
632 | |
633 default: | |
634 return false; | |
635 } | |
636 | |
637 return true; | |
638 } | |
639 | |
640 | |
641 /* Return true if EXPR is a CALL_EXPR suitable for representation | |
642 as a single GIMPLE_CALL statement. If the arguments require | |
643 further gimplification, return false. */ | |
644 | |
645 bool | |
646 valid_gimple_call_p (tree expr) | |
647 { | |
648 unsigned i, nargs; | |
649 | |
650 if (TREE_CODE (expr) != CALL_EXPR) | |
651 return false; | |
652 | |
653 nargs = call_expr_nargs (expr); | |
654 for (i = 0; i < nargs; i++) | |
655 if (! is_gimple_operand (CALL_EXPR_ARG (expr, i))) | |
656 return false; | |
657 | |
658 return true; | |
659 } | |
660 | |
661 | |
662 /* Make SSA names defined by OLD_STMT point to NEW_STMT | |
663 as their defining statement. */ | |
664 | |
665 void | |
666 move_ssa_defining_stmt_for_defs (gimple new_stmt, gimple old_stmt) | |
667 { | |
668 tree var; | |
669 ssa_op_iter iter; | |
670 | |
671 if (gimple_in_ssa_p (cfun)) | |
672 { | |
673 /* Make defined SSA_NAMEs point to the new | |
674 statement as their definition. */ | |
675 FOR_EACH_SSA_TREE_OPERAND (var, old_stmt, iter, SSA_OP_ALL_DEFS) | |
676 { | |
677 if (TREE_CODE (var) == SSA_NAME) | |
678 SSA_NAME_DEF_STMT (var) = new_stmt; | |
679 } | |
680 } | |
681 } | |
682 | |
683 | |
684 /* Update a GIMPLE_CALL statement at iterator *SI_P to reflect the | |
685 value of EXPR, which is expected to be the result of folding the | |
686 call. This can only be done if EXPR is a CALL_EXPR with valid | |
687 GIMPLE operands as arguments, or if it is a suitable RHS expression | |
688 for a GIMPLE_ASSIGN. More complex expressions will require | |
689 gimplification, which will introduce addtional statements. In this | |
690 event, no update is performed, and the function returns false. | |
691 Note that we cannot mutate a GIMPLE_CALL in-place, so we always | |
692 replace the statement at *SI_P with an entirely new statement. | |
693 The new statement need not be a call, e.g., if the original call | |
694 folded to a constant. */ | |
695 | |
696 bool | |
697 update_call_from_tree (gimple_stmt_iterator *si_p, tree expr) | |
698 { | |
699 tree lhs; | |
700 | |
701 gimple stmt = gsi_stmt (*si_p); | |
702 | |
703 gcc_assert (is_gimple_call (stmt)); | |
704 | |
705 lhs = gimple_call_lhs (stmt); | |
706 | |
707 if (valid_gimple_call_p (expr)) | |
708 { | |
709 /* The call has simplified to another call. */ | |
710 tree fn = CALL_EXPR_FN (expr); | |
711 unsigned i; | |
712 unsigned nargs = call_expr_nargs (expr); | |
713 VEC(tree, heap) *args = NULL; | |
714 gimple new_stmt; | |
715 | |
716 if (nargs > 0) | |
717 { | |
718 args = VEC_alloc (tree, heap, nargs); | |
719 VEC_safe_grow (tree, heap, args, nargs); | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
720 |
0 | 721 for (i = 0; i < nargs; i++) |
722 VEC_replace (tree, args, i, CALL_EXPR_ARG (expr, i)); | |
723 } | |
724 | |
725 new_stmt = gimple_build_call_vec (fn, args); | |
726 gimple_call_set_lhs (new_stmt, lhs); | |
727 move_ssa_defining_stmt_for_defs (new_stmt, stmt); | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
728 gimple_set_vuse (new_stmt, gimple_vuse (stmt)); |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
729 gimple_set_vdef (new_stmt, gimple_vdef (stmt)); |
0 | 730 gimple_set_location (new_stmt, gimple_location (stmt)); |
731 gsi_replace (si_p, new_stmt, false); | |
732 VEC_free (tree, heap, args); | |
733 | |
734 return true; | |
735 } | |
736 else if (valid_gimple_rhs_p (expr)) | |
737 { | |
738 gimple new_stmt; | |
739 | |
740 /* The call has simplified to an expression | |
741 that cannot be represented as a GIMPLE_CALL. */ | |
742 if (lhs) | |
743 { | |
744 /* A value is expected. | |
745 Introduce a new GIMPLE_ASSIGN statement. */ | |
746 STRIP_USELESS_TYPE_CONVERSION (expr); | |
747 new_stmt = gimple_build_assign (lhs, expr); | |
748 move_ssa_defining_stmt_for_defs (new_stmt, stmt); | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
749 gimple_set_vuse (new_stmt, gimple_vuse (stmt)); |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
750 gimple_set_vdef (new_stmt, gimple_vdef (stmt)); |
0 | 751 } |
752 else if (!TREE_SIDE_EFFECTS (expr)) | |
753 { | |
754 /* No value is expected, and EXPR has no effect. | |
755 Replace it with an empty statement. */ | |
756 new_stmt = gimple_build_nop (); | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
757 unlink_stmt_vdef (stmt); |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
758 release_defs (stmt); |
0 | 759 } |
760 else | |
761 { | |
762 /* No value is expected, but EXPR has an effect, | |
763 e.g., it could be a reference to a volatile | |
764 variable. Create an assignment statement | |
765 with a dummy (unused) lhs variable. */ | |
766 STRIP_USELESS_TYPE_CONVERSION (expr); | |
767 lhs = create_tmp_var (TREE_TYPE (expr), NULL); | |
768 new_stmt = gimple_build_assign (lhs, expr); | |
769 add_referenced_var (lhs); | |
770 lhs = make_ssa_name (lhs, new_stmt); | |
771 gimple_assign_set_lhs (new_stmt, lhs); | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
772 gimple_set_vuse (new_stmt, gimple_vuse (stmt)); |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
773 gimple_set_vdef (new_stmt, gimple_vdef (stmt)); |
0 | 774 move_ssa_defining_stmt_for_defs (new_stmt, stmt); |
775 } | |
776 gimple_set_location (new_stmt, gimple_location (stmt)); | |
777 gsi_replace (si_p, new_stmt, false); | |
778 return true; | |
779 } | |
780 else | |
781 /* The call simplified to an expression that is | |
782 not a valid GIMPLE RHS. */ | |
783 return false; | |
784 } | |
785 | |
786 | |
787 /* Entry point to the propagation engine. | |
788 | |
789 VISIT_STMT is called for every statement visited. | |
790 VISIT_PHI is called for every PHI node visited. */ | |
791 | |
792 void | |
793 ssa_propagate (ssa_prop_visit_stmt_fn visit_stmt, | |
794 ssa_prop_visit_phi_fn visit_phi) | |
795 { | |
796 ssa_prop_visit_stmt = visit_stmt; | |
797 ssa_prop_visit_phi = visit_phi; | |
798 | |
799 ssa_prop_init (); | |
800 | |
801 /* Iterate until the worklists are empty. */ | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
802 while (!cfg_blocks_empty_p () |
0 | 803 || VEC_length (gimple, interesting_ssa_edges) > 0 |
804 || VEC_length (gimple, varying_ssa_edges) > 0) | |
805 { | |
806 if (!cfg_blocks_empty_p ()) | |
807 { | |
808 /* Pull the next block to simulate off the worklist. */ | |
809 basic_block dest_block = cfg_blocks_get (); | |
810 simulate_block (dest_block); | |
811 } | |
812 | |
813 /* In order to move things to varying as quickly as | |
814 possible,process the VARYING_SSA_EDGES worklist first. */ | |
815 process_ssa_edge_worklist (&varying_ssa_edges); | |
816 | |
817 /* Now process the INTERESTING_SSA_EDGES worklist. */ | |
818 process_ssa_edge_worklist (&interesting_ssa_edges); | |
819 } | |
820 | |
821 ssa_prop_fini (); | |
822 } | |
823 | |
824 | |
825 /* Return true if STMT is of the form 'mem_ref = RHS', where 'mem_ref' | |
826 is a non-volatile pointer dereference, a structure reference or a | |
827 reference to a single _DECL. Ignore volatile memory references | |
828 because they are not interesting for the optimizers. */ | |
829 | |
830 bool | |
831 stmt_makes_single_store (gimple stmt) | |
832 { | |
833 tree lhs; | |
834 | |
835 if (gimple_code (stmt) != GIMPLE_ASSIGN | |
836 && gimple_code (stmt) != GIMPLE_CALL) | |
837 return false; | |
838 | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
839 if (!gimple_vdef (stmt)) |
0 | 840 return false; |
841 | |
842 lhs = gimple_get_lhs (stmt); | |
843 | |
844 /* A call statement may have a null LHS. */ | |
845 if (!lhs) | |
846 return false; | |
847 | |
848 return (!TREE_THIS_VOLATILE (lhs) | |
849 && (DECL_P (lhs) | |
850 || REFERENCE_CLASS_P (lhs))); | |
851 } | |
852 | |
853 | |
854 /* Propagation statistics. */ | |
855 struct prop_stats_d | |
856 { | |
857 long num_const_prop; | |
858 long num_copy_prop; | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
859 long num_stmts_folded; |
0 | 860 long num_dce; |
861 }; | |
862 | |
863 static struct prop_stats_d prop_stats; | |
864 | |
865 /* Replace USE references in statement STMT with the values stored in | |
866 PROP_VALUE. Return true if at least one reference was replaced. */ | |
867 | |
868 static bool | |
869 replace_uses_in (gimple stmt, prop_value_t *prop_value) | |
870 { | |
871 bool replaced = false; | |
872 use_operand_p use; | |
873 ssa_op_iter iter; | |
874 | |
875 FOR_EACH_SSA_USE_OPERAND (use, stmt, iter, SSA_OP_USE) | |
876 { | |
877 tree tuse = USE_FROM_PTR (use); | |
878 tree val = prop_value[SSA_NAME_VERSION (tuse)].value; | |
879 | |
880 if (val == tuse || val == NULL_TREE) | |
881 continue; | |
882 | |
883 if (gimple_code (stmt) == GIMPLE_ASM | |
884 && !may_propagate_copy_into_asm (tuse)) | |
885 continue; | |
886 | |
887 if (!may_propagate_copy (tuse, val)) | |
888 continue; | |
889 | |
890 if (TREE_CODE (val) != SSA_NAME) | |
891 prop_stats.num_const_prop++; | |
892 else | |
893 prop_stats.num_copy_prop++; | |
894 | |
895 propagate_value (use, val); | |
896 | |
897 replaced = true; | |
898 } | |
899 | |
900 return replaced; | |
901 } | |
902 | |
903 | |
904 /* Replace propagated values into all the arguments for PHI using the | |
905 values from PROP_VALUE. */ | |
906 | |
907 static void | |
908 replace_phi_args_in (gimple phi, prop_value_t *prop_value) | |
909 { | |
910 size_t i; | |
911 bool replaced = false; | |
912 | |
913 if (dump_file && (dump_flags & TDF_DETAILS)) | |
914 { | |
915 fprintf (dump_file, "Folding PHI node: "); | |
916 print_gimple_stmt (dump_file, phi, 0, TDF_SLIM); | |
917 } | |
918 | |
919 for (i = 0; i < gimple_phi_num_args (phi); i++) | |
920 { | |
921 tree arg = gimple_phi_arg_def (phi, i); | |
922 | |
923 if (TREE_CODE (arg) == SSA_NAME) | |
924 { | |
925 tree val = prop_value[SSA_NAME_VERSION (arg)].value; | |
926 | |
927 if (val && val != arg && may_propagate_copy (arg, val)) | |
928 { | |
929 if (TREE_CODE (val) != SSA_NAME) | |
930 prop_stats.num_const_prop++; | |
931 else | |
932 prop_stats.num_copy_prop++; | |
933 | |
934 propagate_value (PHI_ARG_DEF_PTR (phi, i), val); | |
935 replaced = true; | |
936 | |
937 /* If we propagated a copy and this argument flows | |
938 through an abnormal edge, update the replacement | |
939 accordingly. */ | |
940 if (TREE_CODE (val) == SSA_NAME | |
941 && gimple_phi_arg_edge (phi, i)->flags & EDGE_ABNORMAL) | |
942 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (val) = 1; | |
943 } | |
944 } | |
945 } | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
946 |
0 | 947 if (dump_file && (dump_flags & TDF_DETAILS)) |
948 { | |
949 if (!replaced) | |
950 fprintf (dump_file, "No folding possible\n"); | |
951 else | |
952 { | |
953 fprintf (dump_file, "Folded into: "); | |
954 print_gimple_stmt (dump_file, phi, 0, TDF_SLIM); | |
955 fprintf (dump_file, "\n"); | |
956 } | |
957 } | |
958 } | |
959 | |
960 | |
961 /* Perform final substitution and folding of propagated values. | |
962 | |
963 PROP_VALUE[I] contains the single value that should be substituted | |
964 at every use of SSA name N_I. If PROP_VALUE is NULL, no values are | |
965 substituted. | |
966 | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
967 If FOLD_FN is non-NULL the function will be invoked on all statements |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
968 before propagating values for pass specific simplification. |
0 | 969 |
970 Return TRUE when something changed. */ | |
971 | |
972 bool | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
973 substitute_and_fold (prop_value_t *prop_value, ssa_prop_fold_stmt_fn fold_fn) |
0 | 974 { |
975 basic_block bb; | |
976 bool something_changed = false; | |
977 | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
978 if (prop_value == NULL && !fold_fn) |
0 | 979 return false; |
980 | |
981 if (dump_file && (dump_flags & TDF_DETAILS)) | |
982 fprintf (dump_file, "\nSubstituting values and folding statements\n\n"); | |
983 | |
984 memset (&prop_stats, 0, sizeof (prop_stats)); | |
985 | |
986 /* Substitute values in every statement of every basic block. */ | |
987 FOR_EACH_BB (bb) | |
988 { | |
989 gimple_stmt_iterator i; | |
990 | |
991 /* Propagate known values into PHI nodes. */ | |
992 if (prop_value) | |
993 for (i = gsi_start_phis (bb); !gsi_end_p (i); gsi_next (&i)) | |
994 replace_phi_args_in (gsi_stmt (i), prop_value); | |
995 | |
996 /* Propagate known values into stmts. Do a backward walk to expose | |
997 more trivially deletable stmts. */ | |
998 for (i = gsi_last_bb (bb); !gsi_end_p (i);) | |
999 { | |
1000 bool did_replace; | |
1001 gimple stmt = gsi_stmt (i); | |
1002 gimple old_stmt; | |
1003 enum gimple_code code = gimple_code (stmt); | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1004 gimple_stmt_iterator oldi; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1005 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1006 oldi = i; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1007 gsi_prev (&i); |
0 | 1008 |
1009 /* Ignore ASSERT_EXPRs. They are used by VRP to generate | |
1010 range information for names and they are discarded | |
1011 afterwards. */ | |
1012 | |
1013 if (code == GIMPLE_ASSIGN | |
1014 && TREE_CODE (gimple_assign_rhs1 (stmt)) == ASSERT_EXPR) | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1015 continue; |
0 | 1016 |
1017 /* No point propagating into a stmt whose result is not used, | |
1018 but instead we might be able to remove a trivially dead stmt. */ | |
1019 if (gimple_get_lhs (stmt) | |
1020 && TREE_CODE (gimple_get_lhs (stmt)) == SSA_NAME | |
1021 && has_zero_uses (gimple_get_lhs (stmt)) | |
1022 && !stmt_could_throw_p (stmt) | |
1023 && !gimple_has_side_effects (stmt)) | |
1024 { | |
1025 gimple_stmt_iterator i2; | |
1026 | |
1027 if (dump_file && dump_flags & TDF_DETAILS) | |
1028 { | |
1029 fprintf (dump_file, "Removing dead stmt "); | |
1030 print_gimple_stmt (dump_file, stmt, 0, 0); | |
1031 fprintf (dump_file, "\n"); | |
1032 } | |
1033 prop_stats.num_dce++; | |
1034 i2 = gsi_for_stmt (stmt); | |
1035 gsi_remove (&i2, true); | |
1036 release_defs (stmt); | |
1037 continue; | |
1038 } | |
1039 | |
1040 /* Replace the statement with its folded version and mark it | |
1041 folded. */ | |
1042 did_replace = false; | |
1043 if (dump_file && (dump_flags & TDF_DETAILS)) | |
1044 { | |
1045 fprintf (dump_file, "Folding statement: "); | |
1046 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM); | |
1047 } | |
1048 | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1049 old_stmt = stmt; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1050 |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1051 /* Some statements may be simplified using propagator |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1052 specific information. Do this before propagating |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1053 into the stmt to not disturb pass specific information. */ |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1054 if (fold_fn |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1055 && (*fold_fn)(&oldi)) |
0 | 1056 { |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1057 did_replace = true; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1058 prop_stats.num_stmts_folded++; |
0 | 1059 } |
1060 | |
1061 /* Only replace real uses if we couldn't fold the | |
1062 statement using value range information. */ | |
1063 if (prop_value | |
1064 && !did_replace) | |
1065 did_replace |= replace_uses_in (stmt, prop_value); | |
1066 | |
1067 /* If we made a replacement, fold the statement. */ | |
1068 if (did_replace) | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1069 fold_stmt (&oldi); |
0 | 1070 |
1071 /* Now cleanup. */ | |
1072 if (did_replace) | |
1073 { | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1074 stmt = gsi_stmt (oldi); |
0 | 1075 |
1076 /* If we cleaned up EH information from the statement, | |
1077 remove EH edges. */ | |
1078 if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt)) | |
1079 gimple_purge_dead_eh_edges (bb); | |
1080 | |
1081 if (is_gimple_assign (stmt) | |
1082 && (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)) | |
1083 == GIMPLE_SINGLE_RHS)) | |
1084 { | |
1085 tree rhs = gimple_assign_rhs1 (stmt); | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1086 |
0 | 1087 if (TREE_CODE (rhs) == ADDR_EXPR) |
1088 recompute_tree_invariant_for_addr_expr (rhs); | |
1089 } | |
1090 | |
1091 /* Determine what needs to be done to update the SSA form. */ | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1092 update_stmt (stmt); |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1093 if (!is_gimple_debug (stmt)) |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1094 something_changed = true; |
0 | 1095 } |
1096 | |
1097 if (dump_file && (dump_flags & TDF_DETAILS)) | |
1098 { | |
1099 if (did_replace) | |
1100 { | |
1101 fprintf (dump_file, "Folded into: "); | |
1102 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM); | |
1103 fprintf (dump_file, "\n"); | |
1104 } | |
1105 else | |
1106 fprintf (dump_file, "Not folded\n"); | |
1107 } | |
1108 } | |
1109 } | |
1110 | |
1111 statistics_counter_event (cfun, "Constants propagated", | |
1112 prop_stats.num_const_prop); | |
1113 statistics_counter_event (cfun, "Copies propagated", | |
1114 prop_stats.num_copy_prop); | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1115 statistics_counter_event (cfun, "Statements folded", |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
1116 prop_stats.num_stmts_folded); |
0 | 1117 statistics_counter_event (cfun, "Statements deleted", |
1118 prop_stats.num_dce); | |
1119 return something_changed; | |
1120 } | |
1121 | |
1122 #include "gt-tree-ssa-propagate.h" |