Mercurial > hg > CbC > CbC_gcc
comparison gcc/tree-ssa-loop-manip.c @ 0:a06113de4d67
first commit
author | kent <kent@cr.ie.u-ryukyu.ac.jp> |
---|---|
date | Fri, 17 Jul 2009 14:47:48 +0900 (2009-07-17) |
parents | |
children | 77e2b8dfacca |
comparison
equal
deleted
inserted
replaced
-1:000000000000 | 0:a06113de4d67 |
---|---|
1 /* High-level loop manipulation functions. | |
2 Copyright (C) 2004, 2005, 2006, 2007, 2008 Free Software Foundation, Inc. | |
3 | |
4 This file is part of GCC. | |
5 | |
6 GCC is free software; you can redistribute it and/or modify it | |
7 under the terms of the GNU General Public License as published by the | |
8 Free Software Foundation; either version 3, or (at your option) any | |
9 later version. | |
10 | |
11 GCC is distributed in the hope that it will be useful, but WITHOUT | |
12 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
13 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
14 for more details. | |
15 | |
16 You should have received a copy of the GNU General Public License | |
17 along with GCC; see the file COPYING3. If not see | |
18 <http://www.gnu.org/licenses/>. */ | |
19 | |
20 #include "config.h" | |
21 #include "system.h" | |
22 #include "coretypes.h" | |
23 #include "tm.h" | |
24 #include "tree.h" | |
25 #include "rtl.h" | |
26 #include "tm_p.h" | |
27 #include "hard-reg-set.h" | |
28 #include "basic-block.h" | |
29 #include "output.h" | |
30 #include "diagnostic.h" | |
31 #include "tree-flow.h" | |
32 #include "tree-dump.h" | |
33 #include "timevar.h" | |
34 #include "cfgloop.h" | |
35 #include "tree-pass.h" | |
36 #include "cfglayout.h" | |
37 #include "tree-scalar-evolution.h" | |
38 #include "params.h" | |
39 #include "tree-inline.h" | |
40 | |
41 /* Creates an induction variable with value BASE + STEP * iteration in LOOP. | |
42 It is expected that neither BASE nor STEP are shared with other expressions | |
43 (unless the sharing rules allow this). Use VAR as a base var_decl for it | |
44 (if NULL, a new temporary will be created). The increment will occur at | |
45 INCR_POS (after it if AFTER is true, before it otherwise). INCR_POS and | |
46 AFTER can be computed using standard_iv_increment_position. The ssa versions | |
47 of the variable before and after increment will be stored in VAR_BEFORE and | |
48 VAR_AFTER (unless they are NULL). */ | |
49 | |
50 void | |
51 create_iv (tree base, tree step, tree var, struct loop *loop, | |
52 gimple_stmt_iterator *incr_pos, bool after, | |
53 tree *var_before, tree *var_after) | |
54 { | |
55 gimple stmt; | |
56 tree initial, step1; | |
57 gimple_seq stmts; | |
58 tree vb, va; | |
59 enum tree_code incr_op = PLUS_EXPR; | |
60 edge pe = loop_preheader_edge (loop); | |
61 | |
62 if (!var) | |
63 { | |
64 var = create_tmp_var (TREE_TYPE (base), "ivtmp"); | |
65 add_referenced_var (var); | |
66 } | |
67 | |
68 vb = make_ssa_name (var, NULL); | |
69 if (var_before) | |
70 *var_before = vb; | |
71 va = make_ssa_name (var, NULL); | |
72 if (var_after) | |
73 *var_after = va; | |
74 | |
75 /* For easier readability of the created code, produce MINUS_EXPRs | |
76 when suitable. */ | |
77 if (TREE_CODE (step) == INTEGER_CST) | |
78 { | |
79 if (TYPE_UNSIGNED (TREE_TYPE (step))) | |
80 { | |
81 step1 = fold_build1 (NEGATE_EXPR, TREE_TYPE (step), step); | |
82 if (tree_int_cst_lt (step1, step)) | |
83 { | |
84 incr_op = MINUS_EXPR; | |
85 step = step1; | |
86 } | |
87 } | |
88 else | |
89 { | |
90 bool ovf; | |
91 | |
92 if (!tree_expr_nonnegative_warnv_p (step, &ovf) | |
93 && may_negate_without_overflow_p (step)) | |
94 { | |
95 incr_op = MINUS_EXPR; | |
96 step = fold_build1 (NEGATE_EXPR, TREE_TYPE (step), step); | |
97 } | |
98 } | |
99 } | |
100 if (POINTER_TYPE_P (TREE_TYPE (base))) | |
101 { | |
102 step = fold_convert (sizetype, step); | |
103 if (incr_op == MINUS_EXPR) | |
104 step = fold_build1 (NEGATE_EXPR, sizetype, step); | |
105 incr_op = POINTER_PLUS_EXPR; | |
106 } | |
107 /* Gimplify the step if necessary. We put the computations in front of the | |
108 loop (i.e. the step should be loop invariant). */ | |
109 step = force_gimple_operand (step, &stmts, true, NULL_TREE); | |
110 if (stmts) | |
111 gsi_insert_seq_on_edge_immediate (pe, stmts); | |
112 | |
113 stmt = gimple_build_assign_with_ops (incr_op, va, vb, step); | |
114 if (after) | |
115 gsi_insert_after (incr_pos, stmt, GSI_NEW_STMT); | |
116 else | |
117 gsi_insert_before (incr_pos, stmt, GSI_NEW_STMT); | |
118 | |
119 initial = force_gimple_operand (base, &stmts, true, var); | |
120 if (stmts) | |
121 gsi_insert_seq_on_edge_immediate (pe, stmts); | |
122 | |
123 stmt = create_phi_node (vb, loop->header); | |
124 SSA_NAME_DEF_STMT (vb) = stmt; | |
125 add_phi_arg (stmt, initial, loop_preheader_edge (loop)); | |
126 add_phi_arg (stmt, va, loop_latch_edge (loop)); | |
127 } | |
128 | |
129 /* Add exit phis for the USE on EXIT. */ | |
130 | |
131 static void | |
132 add_exit_phis_edge (basic_block exit, tree use) | |
133 { | |
134 gimple phi, def_stmt = SSA_NAME_DEF_STMT (use); | |
135 basic_block def_bb = gimple_bb (def_stmt); | |
136 struct loop *def_loop; | |
137 edge e; | |
138 edge_iterator ei; | |
139 | |
140 /* Check that some of the edges entering the EXIT block exits a loop in | |
141 that USE is defined. */ | |
142 FOR_EACH_EDGE (e, ei, exit->preds) | |
143 { | |
144 def_loop = find_common_loop (def_bb->loop_father, e->src->loop_father); | |
145 if (!flow_bb_inside_loop_p (def_loop, e->dest)) | |
146 break; | |
147 } | |
148 | |
149 if (!e) | |
150 return; | |
151 | |
152 phi = create_phi_node (use, exit); | |
153 create_new_def_for (gimple_phi_result (phi), phi, | |
154 gimple_phi_result_ptr (phi)); | |
155 FOR_EACH_EDGE (e, ei, exit->preds) | |
156 add_phi_arg (phi, use, e); | |
157 } | |
158 | |
159 /* Add exit phis for VAR that is used in LIVEIN. | |
160 Exits of the loops are stored in EXITS. */ | |
161 | |
162 static void | |
163 add_exit_phis_var (tree var, bitmap livein, bitmap exits) | |
164 { | |
165 bitmap def; | |
166 unsigned index; | |
167 basic_block def_bb = gimple_bb (SSA_NAME_DEF_STMT (var)); | |
168 bitmap_iterator bi; | |
169 | |
170 if (is_gimple_reg (var)) | |
171 bitmap_clear_bit (livein, def_bb->index); | |
172 else | |
173 bitmap_set_bit (livein, def_bb->index); | |
174 | |
175 def = BITMAP_ALLOC (NULL); | |
176 bitmap_set_bit (def, def_bb->index); | |
177 compute_global_livein (livein, def); | |
178 BITMAP_FREE (def); | |
179 | |
180 EXECUTE_IF_AND_IN_BITMAP (exits, livein, 0, index, bi) | |
181 { | |
182 add_exit_phis_edge (BASIC_BLOCK (index), var); | |
183 } | |
184 } | |
185 | |
186 /* Add exit phis for the names marked in NAMES_TO_RENAME. | |
187 Exits of the loops are stored in EXITS. Sets of blocks where the ssa | |
188 names are used are stored in USE_BLOCKS. */ | |
189 | |
190 static void | |
191 add_exit_phis (bitmap names_to_rename, bitmap *use_blocks, bitmap loop_exits) | |
192 { | |
193 unsigned i; | |
194 bitmap_iterator bi; | |
195 | |
196 EXECUTE_IF_SET_IN_BITMAP (names_to_rename, 0, i, bi) | |
197 { | |
198 add_exit_phis_var (ssa_name (i), use_blocks[i], loop_exits); | |
199 } | |
200 } | |
201 | |
202 /* Returns a bitmap of all loop exit edge targets. */ | |
203 | |
204 static bitmap | |
205 get_loops_exits (void) | |
206 { | |
207 bitmap exits = BITMAP_ALLOC (NULL); | |
208 basic_block bb; | |
209 edge e; | |
210 edge_iterator ei; | |
211 | |
212 FOR_EACH_BB (bb) | |
213 { | |
214 FOR_EACH_EDGE (e, ei, bb->preds) | |
215 if (e->src != ENTRY_BLOCK_PTR | |
216 && !flow_bb_inside_loop_p (e->src->loop_father, bb)) | |
217 { | |
218 bitmap_set_bit (exits, bb->index); | |
219 break; | |
220 } | |
221 } | |
222 | |
223 return exits; | |
224 } | |
225 | |
226 /* For USE in BB, if it is used outside of the loop it is defined in, | |
227 mark it for rewrite. Record basic block BB where it is used | |
228 to USE_BLOCKS. Record the ssa name index to NEED_PHIS bitmap. */ | |
229 | |
230 static void | |
231 find_uses_to_rename_use (basic_block bb, tree use, bitmap *use_blocks, | |
232 bitmap need_phis) | |
233 { | |
234 unsigned ver; | |
235 basic_block def_bb; | |
236 struct loop *def_loop; | |
237 | |
238 if (TREE_CODE (use) != SSA_NAME) | |
239 return; | |
240 | |
241 /* We don't need to keep virtual operands in loop-closed form. */ | |
242 if (!is_gimple_reg (use)) | |
243 return; | |
244 | |
245 ver = SSA_NAME_VERSION (use); | |
246 def_bb = gimple_bb (SSA_NAME_DEF_STMT (use)); | |
247 if (!def_bb) | |
248 return; | |
249 def_loop = def_bb->loop_father; | |
250 | |
251 /* If the definition is not inside a loop, it is not interesting. */ | |
252 if (!loop_outer (def_loop)) | |
253 return; | |
254 | |
255 /* If the use is not outside of the loop it is defined in, it is not | |
256 interesting. */ | |
257 if (flow_bb_inside_loop_p (def_loop, bb)) | |
258 return; | |
259 | |
260 if (!use_blocks[ver]) | |
261 use_blocks[ver] = BITMAP_ALLOC (NULL); | |
262 bitmap_set_bit (use_blocks[ver], bb->index); | |
263 | |
264 bitmap_set_bit (need_phis, ver); | |
265 } | |
266 | |
267 /* For uses in STMT, mark names that are used outside of the loop they are | |
268 defined to rewrite. Record the set of blocks in that the ssa | |
269 names are defined to USE_BLOCKS and the ssa names themselves to | |
270 NEED_PHIS. */ | |
271 | |
272 static void | |
273 find_uses_to_rename_stmt (gimple stmt, bitmap *use_blocks, bitmap need_phis) | |
274 { | |
275 ssa_op_iter iter; | |
276 tree var; | |
277 basic_block bb = gimple_bb (stmt); | |
278 | |
279 FOR_EACH_SSA_TREE_OPERAND (var, stmt, iter, SSA_OP_ALL_USES) | |
280 find_uses_to_rename_use (bb, var, use_blocks, need_phis); | |
281 } | |
282 | |
283 /* Marks names that are used in BB and outside of the loop they are | |
284 defined in for rewrite. Records the set of blocks in that the ssa | |
285 names are defined to USE_BLOCKS. Record the SSA names that will | |
286 need exit PHIs in NEED_PHIS. */ | |
287 | |
288 static void | |
289 find_uses_to_rename_bb (basic_block bb, bitmap *use_blocks, bitmap need_phis) | |
290 { | |
291 gimple_stmt_iterator bsi; | |
292 edge e; | |
293 edge_iterator ei; | |
294 | |
295 FOR_EACH_EDGE (e, ei, bb->succs) | |
296 for (bsi = gsi_start_phis (e->dest); !gsi_end_p (bsi); gsi_next (&bsi)) | |
297 find_uses_to_rename_use (bb, PHI_ARG_DEF_FROM_EDGE (gsi_stmt (bsi), e), | |
298 use_blocks, need_phis); | |
299 | |
300 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi)) | |
301 find_uses_to_rename_stmt (gsi_stmt (bsi), use_blocks, need_phis); | |
302 } | |
303 | |
304 /* Marks names that are used outside of the loop they are defined in | |
305 for rewrite. Records the set of blocks in that the ssa | |
306 names are defined to USE_BLOCKS. If CHANGED_BBS is not NULL, | |
307 scan only blocks in this set. */ | |
308 | |
309 static void | |
310 find_uses_to_rename (bitmap changed_bbs, bitmap *use_blocks, bitmap need_phis) | |
311 { | |
312 basic_block bb; | |
313 unsigned index; | |
314 bitmap_iterator bi; | |
315 | |
316 if (changed_bbs && !bitmap_empty_p (changed_bbs)) | |
317 { | |
318 EXECUTE_IF_SET_IN_BITMAP (changed_bbs, 0, index, bi) | |
319 { | |
320 find_uses_to_rename_bb (BASIC_BLOCK (index), use_blocks, need_phis); | |
321 } | |
322 } | |
323 else | |
324 { | |
325 FOR_EACH_BB (bb) | |
326 { | |
327 find_uses_to_rename_bb (bb, use_blocks, need_phis); | |
328 } | |
329 } | |
330 } | |
331 | |
332 /* Rewrites the program into a loop closed ssa form -- i.e. inserts extra | |
333 phi nodes to ensure that no variable is used outside the loop it is | |
334 defined in. | |
335 | |
336 This strengthening of the basic ssa form has several advantages: | |
337 | |
338 1) Updating it during unrolling/peeling/versioning is trivial, since | |
339 we do not need to care about the uses outside of the loop. | |
340 2) The behavior of all uses of an induction variable is the same. | |
341 Without this, you need to distinguish the case when the variable | |
342 is used outside of the loop it is defined in, for example | |
343 | |
344 for (i = 0; i < 100; i++) | |
345 { | |
346 for (j = 0; j < 100; j++) | |
347 { | |
348 k = i + j; | |
349 use1 (k); | |
350 } | |
351 use2 (k); | |
352 } | |
353 | |
354 Looking from the outer loop with the normal SSA form, the first use of k | |
355 is not well-behaved, while the second one is an induction variable with | |
356 base 99 and step 1. | |
357 | |
358 If CHANGED_BBS is not NULL, we look for uses outside loops only in | |
359 the basic blocks in this set. | |
360 | |
361 UPDATE_FLAG is used in the call to update_ssa. See | |
362 TODO_update_ssa* for documentation. */ | |
363 | |
364 void | |
365 rewrite_into_loop_closed_ssa (bitmap changed_bbs, unsigned update_flag) | |
366 { | |
367 bitmap loop_exits; | |
368 bitmap *use_blocks; | |
369 unsigned i, old_num_ssa_names; | |
370 bitmap names_to_rename; | |
371 | |
372 loops_state_set (LOOP_CLOSED_SSA); | |
373 if (number_of_loops () <= 1) | |
374 return; | |
375 | |
376 loop_exits = get_loops_exits (); | |
377 names_to_rename = BITMAP_ALLOC (NULL); | |
378 | |
379 /* If the pass has caused the SSA form to be out-of-date, update it | |
380 now. */ | |
381 update_ssa (update_flag); | |
382 | |
383 old_num_ssa_names = num_ssa_names; | |
384 use_blocks = XCNEWVEC (bitmap, old_num_ssa_names); | |
385 | |
386 /* Find the uses outside loops. */ | |
387 find_uses_to_rename (changed_bbs, use_blocks, names_to_rename); | |
388 | |
389 /* Add the PHI nodes on exits of the loops for the names we need to | |
390 rewrite. */ | |
391 add_exit_phis (names_to_rename, use_blocks, loop_exits); | |
392 | |
393 for (i = 0; i < old_num_ssa_names; i++) | |
394 BITMAP_FREE (use_blocks[i]); | |
395 free (use_blocks); | |
396 BITMAP_FREE (loop_exits); | |
397 BITMAP_FREE (names_to_rename); | |
398 | |
399 /* Fix up all the names found to be used outside their original | |
400 loops. */ | |
401 update_ssa (TODO_update_ssa); | |
402 } | |
403 | |
404 /* Check invariants of the loop closed ssa form for the USE in BB. */ | |
405 | |
406 static void | |
407 check_loop_closed_ssa_use (basic_block bb, tree use) | |
408 { | |
409 gimple def; | |
410 basic_block def_bb; | |
411 | |
412 if (TREE_CODE (use) != SSA_NAME || !is_gimple_reg (use)) | |
413 return; | |
414 | |
415 def = SSA_NAME_DEF_STMT (use); | |
416 def_bb = gimple_bb (def); | |
417 gcc_assert (!def_bb | |
418 || flow_bb_inside_loop_p (def_bb->loop_father, bb)); | |
419 } | |
420 | |
421 /* Checks invariants of loop closed ssa form in statement STMT in BB. */ | |
422 | |
423 static void | |
424 check_loop_closed_ssa_stmt (basic_block bb, gimple stmt) | |
425 { | |
426 ssa_op_iter iter; | |
427 tree var; | |
428 | |
429 FOR_EACH_SSA_TREE_OPERAND (var, stmt, iter, SSA_OP_ALL_USES) | |
430 check_loop_closed_ssa_use (bb, var); | |
431 } | |
432 | |
433 /* Checks that invariants of the loop closed ssa form are preserved. */ | |
434 | |
435 void | |
436 verify_loop_closed_ssa (void) | |
437 { | |
438 basic_block bb; | |
439 gimple_stmt_iterator bsi; | |
440 gimple phi; | |
441 edge e; | |
442 edge_iterator ei; | |
443 | |
444 if (number_of_loops () <= 1) | |
445 return; | |
446 | |
447 verify_ssa (false); | |
448 | |
449 FOR_EACH_BB (bb) | |
450 { | |
451 for (bsi = gsi_start_phis (bb); !gsi_end_p (bsi); gsi_next (&bsi)) | |
452 { | |
453 phi = gsi_stmt (bsi); | |
454 FOR_EACH_EDGE (e, ei, bb->preds) | |
455 check_loop_closed_ssa_use (e->src, | |
456 PHI_ARG_DEF_FROM_EDGE (phi, e)); | |
457 } | |
458 | |
459 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi)) | |
460 check_loop_closed_ssa_stmt (bb, gsi_stmt (bsi)); | |
461 } | |
462 } | |
463 | |
464 /* Split loop exit edge EXIT. The things are a bit complicated by a need to | |
465 preserve the loop closed ssa form. The newly created block is returned. */ | |
466 | |
467 basic_block | |
468 split_loop_exit_edge (edge exit) | |
469 { | |
470 basic_block dest = exit->dest; | |
471 basic_block bb = split_edge (exit); | |
472 gimple phi, new_phi; | |
473 tree new_name, name; | |
474 use_operand_p op_p; | |
475 gimple_stmt_iterator psi; | |
476 | |
477 for (psi = gsi_start_phis (dest); !gsi_end_p (psi); gsi_next (&psi)) | |
478 { | |
479 phi = gsi_stmt (psi); | |
480 op_p = PHI_ARG_DEF_PTR_FROM_EDGE (phi, single_succ_edge (bb)); | |
481 | |
482 name = USE_FROM_PTR (op_p); | |
483 | |
484 /* If the argument of the PHI node is a constant, we do not need | |
485 to keep it inside loop. */ | |
486 if (TREE_CODE (name) != SSA_NAME) | |
487 continue; | |
488 | |
489 /* Otherwise create an auxiliary phi node that will copy the value | |
490 of the SSA name out of the loop. */ | |
491 new_name = duplicate_ssa_name (name, NULL); | |
492 new_phi = create_phi_node (new_name, bb); | |
493 SSA_NAME_DEF_STMT (new_name) = new_phi; | |
494 add_phi_arg (new_phi, name, exit); | |
495 SET_USE (op_p, new_name); | |
496 } | |
497 | |
498 return bb; | |
499 } | |
500 | |
501 /* Returns the basic block in that statements should be emitted for induction | |
502 variables incremented at the end of the LOOP. */ | |
503 | |
504 basic_block | |
505 ip_end_pos (struct loop *loop) | |
506 { | |
507 return loop->latch; | |
508 } | |
509 | |
510 /* Returns the basic block in that statements should be emitted for induction | |
511 variables incremented just before exit condition of a LOOP. */ | |
512 | |
513 basic_block | |
514 ip_normal_pos (struct loop *loop) | |
515 { | |
516 gimple last; | |
517 basic_block bb; | |
518 edge exit; | |
519 | |
520 if (!single_pred_p (loop->latch)) | |
521 return NULL; | |
522 | |
523 bb = single_pred (loop->latch); | |
524 last = last_stmt (bb); | |
525 if (!last | |
526 || gimple_code (last) != GIMPLE_COND) | |
527 return NULL; | |
528 | |
529 exit = EDGE_SUCC (bb, 0); | |
530 if (exit->dest == loop->latch) | |
531 exit = EDGE_SUCC (bb, 1); | |
532 | |
533 if (flow_bb_inside_loop_p (loop, exit->dest)) | |
534 return NULL; | |
535 | |
536 return bb; | |
537 } | |
538 | |
539 /* Stores the standard position for induction variable increment in LOOP | |
540 (just before the exit condition if it is available and latch block is empty, | |
541 end of the latch block otherwise) to BSI. INSERT_AFTER is set to true if | |
542 the increment should be inserted after *BSI. */ | |
543 | |
544 void | |
545 standard_iv_increment_position (struct loop *loop, gimple_stmt_iterator *bsi, | |
546 bool *insert_after) | |
547 { | |
548 basic_block bb = ip_normal_pos (loop), latch = ip_end_pos (loop); | |
549 gimple last = last_stmt (latch); | |
550 | |
551 if (!bb | |
552 || (last && gimple_code (last) != GIMPLE_LABEL)) | |
553 { | |
554 *bsi = gsi_last_bb (latch); | |
555 *insert_after = true; | |
556 } | |
557 else | |
558 { | |
559 *bsi = gsi_last_bb (bb); | |
560 *insert_after = false; | |
561 } | |
562 } | |
563 | |
564 /* Copies phi node arguments for duplicated blocks. The index of the first | |
565 duplicated block is FIRST_NEW_BLOCK. */ | |
566 | |
567 static void | |
568 copy_phi_node_args (unsigned first_new_block) | |
569 { | |
570 unsigned i; | |
571 | |
572 for (i = first_new_block; i < (unsigned) last_basic_block; i++) | |
573 BASIC_BLOCK (i)->flags |= BB_DUPLICATED; | |
574 | |
575 for (i = first_new_block; i < (unsigned) last_basic_block; i++) | |
576 add_phi_args_after_copy_bb (BASIC_BLOCK (i)); | |
577 | |
578 for (i = first_new_block; i < (unsigned) last_basic_block; i++) | |
579 BASIC_BLOCK (i)->flags &= ~BB_DUPLICATED; | |
580 } | |
581 | |
582 | |
583 /* The same as cfgloopmanip.c:duplicate_loop_to_header_edge, but also | |
584 updates the PHI nodes at start of the copied region. In order to | |
585 achieve this, only loops whose exits all lead to the same location | |
586 are handled. | |
587 | |
588 Notice that we do not completely update the SSA web after | |
589 duplication. The caller is responsible for calling update_ssa | |
590 after the loop has been duplicated. */ | |
591 | |
592 bool | |
593 gimple_duplicate_loop_to_header_edge (struct loop *loop, edge e, | |
594 unsigned int ndupl, sbitmap wont_exit, | |
595 edge orig, VEC (edge, heap) **to_remove, | |
596 int flags) | |
597 { | |
598 unsigned first_new_block; | |
599 | |
600 if (!loops_state_satisfies_p (LOOPS_HAVE_SIMPLE_LATCHES)) | |
601 return false; | |
602 if (!loops_state_satisfies_p (LOOPS_HAVE_PREHEADERS)) | |
603 return false; | |
604 | |
605 #ifdef ENABLE_CHECKING | |
606 if (loops_state_satisfies_p (LOOP_CLOSED_SSA)) | |
607 verify_loop_closed_ssa (); | |
608 #endif | |
609 | |
610 first_new_block = last_basic_block; | |
611 if (!duplicate_loop_to_header_edge (loop, e, ndupl, wont_exit, | |
612 orig, to_remove, flags)) | |
613 return false; | |
614 | |
615 /* Readd the removed phi args for e. */ | |
616 flush_pending_stmts (e); | |
617 | |
618 /* Copy the phi node arguments. */ | |
619 copy_phi_node_args (first_new_block); | |
620 | |
621 scev_reset (); | |
622 | |
623 return true; | |
624 } | |
625 | |
626 /* Returns true if we can unroll LOOP FACTOR times. Number | |
627 of iterations of the loop is returned in NITER. */ | |
628 | |
629 bool | |
630 can_unroll_loop_p (struct loop *loop, unsigned factor, | |
631 struct tree_niter_desc *niter) | |
632 { | |
633 edge exit; | |
634 | |
635 /* Check whether unrolling is possible. We only want to unroll loops | |
636 for that we are able to determine number of iterations. We also | |
637 want to split the extra iterations of the loop from its end, | |
638 therefore we require that the loop has precisely one | |
639 exit. */ | |
640 | |
641 exit = single_dom_exit (loop); | |
642 if (!exit) | |
643 return false; | |
644 | |
645 if (!number_of_iterations_exit (loop, exit, niter, false) | |
646 || niter->cmp == ERROR_MARK | |
647 /* Scalar evolutions analysis might have copy propagated | |
648 the abnormal ssa names into these expressions, hence | |
649 emitting the computations based on them during loop | |
650 unrolling might create overlapping life ranges for | |
651 them, and failures in out-of-ssa. */ | |
652 || contains_abnormal_ssa_name_p (niter->may_be_zero) | |
653 || contains_abnormal_ssa_name_p (niter->control.base) | |
654 || contains_abnormal_ssa_name_p (niter->control.step) | |
655 || contains_abnormal_ssa_name_p (niter->bound)) | |
656 return false; | |
657 | |
658 /* And of course, we must be able to duplicate the loop. */ | |
659 if (!can_duplicate_loop_p (loop)) | |
660 return false; | |
661 | |
662 /* The final loop should be small enough. */ | |
663 if (tree_num_loop_insns (loop, &eni_size_weights) * factor | |
664 > (unsigned) PARAM_VALUE (PARAM_MAX_UNROLLED_INSNS)) | |
665 return false; | |
666 | |
667 return true; | |
668 } | |
669 | |
670 /* Determines the conditions that control execution of LOOP unrolled FACTOR | |
671 times. DESC is number of iterations of LOOP. ENTER_COND is set to | |
672 condition that must be true if the main loop can be entered. | |
673 EXIT_BASE, EXIT_STEP, EXIT_CMP and EXIT_BOUND are set to values describing | |
674 how the exit from the unrolled loop should be controlled. */ | |
675 | |
676 static void | |
677 determine_exit_conditions (struct loop *loop, struct tree_niter_desc *desc, | |
678 unsigned factor, tree *enter_cond, | |
679 tree *exit_base, tree *exit_step, | |
680 enum tree_code *exit_cmp, tree *exit_bound) | |
681 { | |
682 gimple_seq stmts; | |
683 tree base = desc->control.base; | |
684 tree step = desc->control.step; | |
685 tree bound = desc->bound; | |
686 tree type = TREE_TYPE (step); | |
687 tree bigstep, delta; | |
688 tree min = lower_bound_in_type (type, type); | |
689 tree max = upper_bound_in_type (type, type); | |
690 enum tree_code cmp = desc->cmp; | |
691 tree cond = boolean_true_node, assum; | |
692 | |
693 /* For pointers, do the arithmetics in the type of step (sizetype). */ | |
694 base = fold_convert (type, base); | |
695 bound = fold_convert (type, bound); | |
696 | |
697 *enter_cond = boolean_false_node; | |
698 *exit_base = NULL_TREE; | |
699 *exit_step = NULL_TREE; | |
700 *exit_cmp = ERROR_MARK; | |
701 *exit_bound = NULL_TREE; | |
702 gcc_assert (cmp != ERROR_MARK); | |
703 | |
704 /* We only need to be correct when we answer question | |
705 "Do at least FACTOR more iterations remain?" in the unrolled loop. | |
706 Thus, transforming BASE + STEP * i <> BOUND to | |
707 BASE + STEP * i < BOUND is ok. */ | |
708 if (cmp == NE_EXPR) | |
709 { | |
710 if (tree_int_cst_sign_bit (step)) | |
711 cmp = GT_EXPR; | |
712 else | |
713 cmp = LT_EXPR; | |
714 } | |
715 else if (cmp == LT_EXPR) | |
716 { | |
717 gcc_assert (!tree_int_cst_sign_bit (step)); | |
718 } | |
719 else if (cmp == GT_EXPR) | |
720 { | |
721 gcc_assert (tree_int_cst_sign_bit (step)); | |
722 } | |
723 else | |
724 gcc_unreachable (); | |
725 | |
726 /* The main body of the loop may be entered iff: | |
727 | |
728 1) desc->may_be_zero is false. | |
729 2) it is possible to check that there are at least FACTOR iterations | |
730 of the loop, i.e., BOUND - step * FACTOR does not overflow. | |
731 3) # of iterations is at least FACTOR */ | |
732 | |
733 if (!integer_zerop (desc->may_be_zero)) | |
734 cond = fold_build2 (TRUTH_AND_EXPR, boolean_type_node, | |
735 invert_truthvalue (desc->may_be_zero), | |
736 cond); | |
737 | |
738 bigstep = fold_build2 (MULT_EXPR, type, step, | |
739 build_int_cst_type (type, factor)); | |
740 delta = fold_build2 (MINUS_EXPR, type, bigstep, step); | |
741 if (cmp == LT_EXPR) | |
742 assum = fold_build2 (GE_EXPR, boolean_type_node, | |
743 bound, | |
744 fold_build2 (PLUS_EXPR, type, min, delta)); | |
745 else | |
746 assum = fold_build2 (LE_EXPR, boolean_type_node, | |
747 bound, | |
748 fold_build2 (PLUS_EXPR, type, max, delta)); | |
749 cond = fold_build2 (TRUTH_AND_EXPR, boolean_type_node, assum, cond); | |
750 | |
751 bound = fold_build2 (MINUS_EXPR, type, bound, delta); | |
752 assum = fold_build2 (cmp, boolean_type_node, base, bound); | |
753 cond = fold_build2 (TRUTH_AND_EXPR, boolean_type_node, assum, cond); | |
754 | |
755 cond = force_gimple_operand (unshare_expr (cond), &stmts, false, NULL_TREE); | |
756 if (stmts) | |
757 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts); | |
758 /* cond now may be a gimple comparison, which would be OK, but also any | |
759 other gimple rhs (say a && b). In this case we need to force it to | |
760 operand. */ | |
761 if (!is_gimple_condexpr (cond)) | |
762 { | |
763 cond = force_gimple_operand (cond, &stmts, true, NULL_TREE); | |
764 if (stmts) | |
765 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts); | |
766 } | |
767 *enter_cond = cond; | |
768 | |
769 base = force_gimple_operand (unshare_expr (base), &stmts, true, NULL_TREE); | |
770 if (stmts) | |
771 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts); | |
772 bound = force_gimple_operand (unshare_expr (bound), &stmts, true, NULL_TREE); | |
773 if (stmts) | |
774 gsi_insert_seq_on_edge_immediate (loop_preheader_edge (loop), stmts); | |
775 | |
776 *exit_base = base; | |
777 *exit_step = bigstep; | |
778 *exit_cmp = cmp; | |
779 *exit_bound = bound; | |
780 } | |
781 | |
782 /* Scales the frequencies of all basic blocks in LOOP that are strictly | |
783 dominated by BB by NUM/DEN. */ | |
784 | |
785 static void | |
786 scale_dominated_blocks_in_loop (struct loop *loop, basic_block bb, | |
787 int num, int den) | |
788 { | |
789 basic_block son; | |
790 | |
791 if (den == 0) | |
792 return; | |
793 | |
794 for (son = first_dom_son (CDI_DOMINATORS, bb); | |
795 son; | |
796 son = next_dom_son (CDI_DOMINATORS, son)) | |
797 { | |
798 if (!flow_bb_inside_loop_p (loop, son)) | |
799 continue; | |
800 scale_bbs_frequencies_int (&son, 1, num, den); | |
801 scale_dominated_blocks_in_loop (loop, son, num, den); | |
802 } | |
803 } | |
804 | |
805 /* Unroll LOOP FACTOR times. DESC describes number of iterations of LOOP. | |
806 EXIT is the exit of the loop to that DESC corresponds. | |
807 | |
808 If N is number of iterations of the loop and MAY_BE_ZERO is the condition | |
809 under that loop exits in the first iteration even if N != 0, | |
810 | |
811 while (1) | |
812 { | |
813 x = phi (init, next); | |
814 | |
815 pre; | |
816 if (st) | |
817 break; | |
818 post; | |
819 } | |
820 | |
821 becomes (with possibly the exit conditions formulated a bit differently, | |
822 avoiding the need to create a new iv): | |
823 | |
824 if (MAY_BE_ZERO || N < FACTOR) | |
825 goto rest; | |
826 | |
827 do | |
828 { | |
829 x = phi (init, next); | |
830 | |
831 pre; | |
832 post; | |
833 pre; | |
834 post; | |
835 ... | |
836 pre; | |
837 post; | |
838 N -= FACTOR; | |
839 | |
840 } while (N >= FACTOR); | |
841 | |
842 rest: | |
843 init' = phi (init, x); | |
844 | |
845 while (1) | |
846 { | |
847 x = phi (init', next); | |
848 | |
849 pre; | |
850 if (st) | |
851 break; | |
852 post; | |
853 } | |
854 | |
855 Before the loop is unrolled, TRANSFORM is called for it (only for the | |
856 unrolled loop, but not for its versioned copy). DATA is passed to | |
857 TRANSFORM. */ | |
858 | |
859 /* Probability in % that the unrolled loop is entered. Just a guess. */ | |
860 #define PROB_UNROLLED_LOOP_ENTERED 90 | |
861 | |
862 void | |
863 tree_transform_and_unroll_loop (struct loop *loop, unsigned factor, | |
864 edge exit, struct tree_niter_desc *desc, | |
865 transform_callback transform, | |
866 void *data) | |
867 { | |
868 gimple exit_if; | |
869 tree ctr_before, ctr_after; | |
870 tree enter_main_cond, exit_base, exit_step, exit_bound; | |
871 enum tree_code exit_cmp; | |
872 gimple phi_old_loop, phi_new_loop, phi_rest; | |
873 gimple_stmt_iterator psi_old_loop, psi_new_loop; | |
874 tree init, next, new_init, var; | |
875 struct loop *new_loop; | |
876 basic_block rest, exit_bb; | |
877 edge old_entry, new_entry, old_latch, precond_edge, new_exit; | |
878 edge new_nonexit, e; | |
879 gimple_stmt_iterator bsi; | |
880 use_operand_p op; | |
881 bool ok; | |
882 unsigned est_niter, prob_entry, scale_unrolled, scale_rest, freq_e, freq_h; | |
883 unsigned new_est_niter, i, prob; | |
884 unsigned irr = loop_preheader_edge (loop)->flags & EDGE_IRREDUCIBLE_LOOP; | |
885 sbitmap wont_exit; | |
886 VEC (edge, heap) *to_remove = NULL; | |
887 | |
888 est_niter = expected_loop_iterations (loop); | |
889 determine_exit_conditions (loop, desc, factor, | |
890 &enter_main_cond, &exit_base, &exit_step, | |
891 &exit_cmp, &exit_bound); | |
892 | |
893 /* Let us assume that the unrolled loop is quite likely to be entered. */ | |
894 if (integer_nonzerop (enter_main_cond)) | |
895 prob_entry = REG_BR_PROB_BASE; | |
896 else | |
897 prob_entry = PROB_UNROLLED_LOOP_ENTERED * REG_BR_PROB_BASE / 100; | |
898 | |
899 /* The values for scales should keep profile consistent, and somewhat close | |
900 to correct. | |
901 | |
902 TODO: The current value of SCALE_REST makes it appear that the loop that | |
903 is created by splitting the remaining iterations of the unrolled loop is | |
904 executed the same number of times as the original loop, and with the same | |
905 frequencies, which is obviously wrong. This does not appear to cause | |
906 problems, so we do not bother with fixing it for now. To make the profile | |
907 correct, we would need to change the probability of the exit edge of the | |
908 loop, and recompute the distribution of frequencies in its body because | |
909 of this change (scale the frequencies of blocks before and after the exit | |
910 by appropriate factors). */ | |
911 scale_unrolled = prob_entry; | |
912 scale_rest = REG_BR_PROB_BASE; | |
913 | |
914 new_loop = loop_version (loop, enter_main_cond, NULL, | |
915 prob_entry, scale_unrolled, scale_rest, true); | |
916 gcc_assert (new_loop != NULL); | |
917 update_ssa (TODO_update_ssa); | |
918 | |
919 /* Determine the probability of the exit edge of the unrolled loop. */ | |
920 new_est_niter = est_niter / factor; | |
921 | |
922 /* Without profile feedback, loops for that we do not know a better estimate | |
923 are assumed to roll 10 times. When we unroll such loop, it appears to | |
924 roll too little, and it may even seem to be cold. To avoid this, we | |
925 ensure that the created loop appears to roll at least 5 times (but at | |
926 most as many times as before unrolling). */ | |
927 if (new_est_niter < 5) | |
928 { | |
929 if (est_niter < 5) | |
930 new_est_niter = est_niter; | |
931 else | |
932 new_est_niter = 5; | |
933 } | |
934 | |
935 /* Prepare the cfg and update the phi nodes. Move the loop exit to the | |
936 loop latch (and make its condition dummy, for the moment). */ | |
937 rest = loop_preheader_edge (new_loop)->src; | |
938 precond_edge = single_pred_edge (rest); | |
939 split_edge (loop_latch_edge (loop)); | |
940 exit_bb = single_pred (loop->latch); | |
941 | |
942 /* Since the exit edge will be removed, the frequency of all the blocks | |
943 in the loop that are dominated by it must be scaled by | |
944 1 / (1 - exit->probability). */ | |
945 scale_dominated_blocks_in_loop (loop, exit->src, | |
946 REG_BR_PROB_BASE, | |
947 REG_BR_PROB_BASE - exit->probability); | |
948 | |
949 bsi = gsi_last_bb (exit_bb); | |
950 exit_if = gimple_build_cond (EQ_EXPR, integer_zero_node, | |
951 integer_zero_node, | |
952 NULL_TREE, NULL_TREE); | |
953 | |
954 gsi_insert_after (&bsi, exit_if, GSI_NEW_STMT); | |
955 new_exit = make_edge (exit_bb, rest, EDGE_FALSE_VALUE | irr); | |
956 rescan_loop_exit (new_exit, true, false); | |
957 | |
958 /* Set the probability of new exit to the same of the old one. Fix | |
959 the frequency of the latch block, by scaling it back by | |
960 1 - exit->probability. */ | |
961 new_exit->count = exit->count; | |
962 new_exit->probability = exit->probability; | |
963 new_nonexit = single_pred_edge (loop->latch); | |
964 new_nonexit->probability = REG_BR_PROB_BASE - exit->probability; | |
965 new_nonexit->flags = EDGE_TRUE_VALUE; | |
966 new_nonexit->count -= exit->count; | |
967 if (new_nonexit->count < 0) | |
968 new_nonexit->count = 0; | |
969 scale_bbs_frequencies_int (&loop->latch, 1, new_nonexit->probability, | |
970 REG_BR_PROB_BASE); | |
971 | |
972 old_entry = loop_preheader_edge (loop); | |
973 new_entry = loop_preheader_edge (new_loop); | |
974 old_latch = loop_latch_edge (loop); | |
975 for (psi_old_loop = gsi_start_phis (loop->header), | |
976 psi_new_loop = gsi_start_phis (new_loop->header); | |
977 !gsi_end_p (psi_old_loop); | |
978 gsi_next (&psi_old_loop), gsi_next (&psi_new_loop)) | |
979 { | |
980 phi_old_loop = gsi_stmt (psi_old_loop); | |
981 phi_new_loop = gsi_stmt (psi_new_loop); | |
982 | |
983 init = PHI_ARG_DEF_FROM_EDGE (phi_old_loop, old_entry); | |
984 op = PHI_ARG_DEF_PTR_FROM_EDGE (phi_new_loop, new_entry); | |
985 gcc_assert (operand_equal_for_phi_arg_p (init, USE_FROM_PTR (op))); | |
986 next = PHI_ARG_DEF_FROM_EDGE (phi_old_loop, old_latch); | |
987 | |
988 /* Prefer using original variable as a base for the new ssa name. | |
989 This is necessary for virtual ops, and useful in order to avoid | |
990 losing debug info for real ops. */ | |
991 if (TREE_CODE (next) == SSA_NAME) | |
992 var = SSA_NAME_VAR (next); | |
993 else if (TREE_CODE (init) == SSA_NAME) | |
994 var = SSA_NAME_VAR (init); | |
995 else | |
996 { | |
997 var = create_tmp_var (TREE_TYPE (init), "unrinittmp"); | |
998 add_referenced_var (var); | |
999 } | |
1000 | |
1001 new_init = make_ssa_name (var, NULL); | |
1002 phi_rest = create_phi_node (new_init, rest); | |
1003 SSA_NAME_DEF_STMT (new_init) = phi_rest; | |
1004 | |
1005 add_phi_arg (phi_rest, init, precond_edge); | |
1006 add_phi_arg (phi_rest, next, new_exit); | |
1007 SET_USE (op, new_init); | |
1008 } | |
1009 | |
1010 remove_path (exit); | |
1011 | |
1012 /* Transform the loop. */ | |
1013 if (transform) | |
1014 (*transform) (loop, data); | |
1015 | |
1016 /* Unroll the loop and remove the exits in all iterations except for the | |
1017 last one. */ | |
1018 wont_exit = sbitmap_alloc (factor); | |
1019 sbitmap_ones (wont_exit); | |
1020 RESET_BIT (wont_exit, factor - 1); | |
1021 | |
1022 ok = gimple_duplicate_loop_to_header_edge | |
1023 (loop, loop_latch_edge (loop), factor - 1, | |
1024 wont_exit, new_exit, &to_remove, DLTHE_FLAG_UPDATE_FREQ); | |
1025 free (wont_exit); | |
1026 gcc_assert (ok); | |
1027 | |
1028 for (i = 0; VEC_iterate (edge, to_remove, i, e); i++) | |
1029 { | |
1030 ok = remove_path (e); | |
1031 gcc_assert (ok); | |
1032 } | |
1033 VEC_free (edge, heap, to_remove); | |
1034 update_ssa (TODO_update_ssa); | |
1035 | |
1036 /* Ensure that the frequencies in the loop match the new estimated | |
1037 number of iterations, and change the probability of the new | |
1038 exit edge. */ | |
1039 freq_h = loop->header->frequency; | |
1040 freq_e = EDGE_FREQUENCY (loop_preheader_edge (loop)); | |
1041 if (freq_h != 0) | |
1042 scale_loop_frequencies (loop, freq_e * (new_est_niter + 1), freq_h); | |
1043 | |
1044 exit_bb = single_pred (loop->latch); | |
1045 new_exit = find_edge (exit_bb, rest); | |
1046 new_exit->count = loop_preheader_edge (loop)->count; | |
1047 new_exit->probability = REG_BR_PROB_BASE / (new_est_niter + 1); | |
1048 | |
1049 rest->count += new_exit->count; | |
1050 rest->frequency += EDGE_FREQUENCY (new_exit); | |
1051 | |
1052 new_nonexit = single_pred_edge (loop->latch); | |
1053 prob = new_nonexit->probability; | |
1054 new_nonexit->probability = REG_BR_PROB_BASE - new_exit->probability; | |
1055 new_nonexit->count = exit_bb->count - new_exit->count; | |
1056 if (new_nonexit->count < 0) | |
1057 new_nonexit->count = 0; | |
1058 if (prob > 0) | |
1059 scale_bbs_frequencies_int (&loop->latch, 1, new_nonexit->probability, | |
1060 prob); | |
1061 | |
1062 /* Finally create the new counter for number of iterations and add the new | |
1063 exit instruction. */ | |
1064 bsi = gsi_last_bb (exit_bb); | |
1065 exit_if = gsi_stmt (bsi); | |
1066 create_iv (exit_base, exit_step, NULL_TREE, loop, | |
1067 &bsi, false, &ctr_before, &ctr_after); | |
1068 gimple_cond_set_code (exit_if, exit_cmp); | |
1069 gimple_cond_set_lhs (exit_if, ctr_after); | |
1070 gimple_cond_set_rhs (exit_if, exit_bound); | |
1071 update_stmt (exit_if); | |
1072 | |
1073 #ifdef ENABLE_CHECKING | |
1074 verify_flow_info (); | |
1075 verify_dominators (CDI_DOMINATORS); | |
1076 verify_loop_structure (); | |
1077 verify_loop_closed_ssa (); | |
1078 #endif | |
1079 } | |
1080 | |
1081 /* Wrapper over tree_transform_and_unroll_loop for case we do not | |
1082 want to transform the loop before unrolling. The meaning | |
1083 of the arguments is the same as for tree_transform_and_unroll_loop. */ | |
1084 | |
1085 void | |
1086 tree_unroll_loop (struct loop *loop, unsigned factor, | |
1087 edge exit, struct tree_niter_desc *desc) | |
1088 { | |
1089 tree_transform_and_unroll_loop (loop, factor, exit, desc, | |
1090 NULL, NULL); | |
1091 } |