0
|
1 /* Perform doloop optimizations
|
|
2 Copyright (C) 2004, 2005, 2006, 2007, 2008 Free Software Foundation,
|
|
3 Inc.
|
|
4 Based on code by Michael P. Hayes (m.hayes@elec.canterbury.ac.nz)
|
|
5
|
|
6 This file is part of GCC.
|
|
7
|
|
8 GCC is free software; you can redistribute it and/or modify it under
|
|
9 the terms of the GNU General Public License as published by the Free
|
|
10 Software Foundation; either version 3, or (at your option) any later
|
|
11 version.
|
|
12
|
|
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
|
|
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
16 for more details.
|
|
17
|
|
18 You should have received a copy of the GNU General Public License
|
|
19 along with GCC; see the file COPYING3. If not see
|
|
20 <http://www.gnu.org/licenses/>. */
|
|
21
|
|
22 #include "config.h"
|
|
23 #include "system.h"
|
|
24 #include "coretypes.h"
|
|
25 #include "tm.h"
|
|
26 #include "rtl.h"
|
|
27 #include "flags.h"
|
|
28 #include "expr.h"
|
|
29 #include "hard-reg-set.h"
|
|
30 #include "basic-block.h"
|
|
31 #include "toplev.h"
|
|
32 #include "tm_p.h"
|
|
33 #include "cfgloop.h"
|
|
34 #include "output.h"
|
|
35 #include "params.h"
|
|
36 #include "target.h"
|
|
37
|
|
38 /* This module is used to modify loops with a determinable number of
|
|
39 iterations to use special low-overhead looping instructions.
|
|
40
|
|
41 It first validates whether the loop is well behaved and has a
|
|
42 determinable number of iterations (either at compile or run-time).
|
|
43 It then modifies the loop to use a low-overhead looping pattern as
|
|
44 follows:
|
|
45
|
|
46 1. A pseudo register is allocated as the loop iteration counter.
|
|
47
|
|
48 2. The number of loop iterations is calculated and is stored
|
|
49 in the loop counter.
|
|
50
|
|
51 3. At the end of the loop, the jump insn is replaced by the
|
|
52 doloop_end pattern. The compare must remain because it might be
|
|
53 used elsewhere. If the loop-variable or condition register are
|
|
54 used elsewhere, they will be eliminated by flow.
|
|
55
|
|
56 4. An optional doloop_begin pattern is inserted at the top of the
|
|
57 loop.
|
|
58
|
|
59 TODO The optimization should only performed when either the biv used for exit
|
|
60 condition is unused at all except for the exit test, or if we do not have to
|
|
61 change its value, since otherwise we have to add a new induction variable,
|
|
62 which usually will not pay up (unless the cost of the doloop pattern is
|
|
63 somehow extremely lower than the cost of compare & jump, or unless the bct
|
|
64 register cannot be used for anything else but doloop -- ??? detect these
|
|
65 cases). */
|
|
66
|
|
67 #ifdef HAVE_doloop_end
|
|
68
|
|
69 /* Return the loop termination condition for PATTERN or zero
|
|
70 if it is not a decrement and branch jump insn. */
|
|
71
|
|
72 rtx
|
|
73 doloop_condition_get (rtx doloop_pat)
|
|
74 {
|
|
75 rtx cmp;
|
|
76 rtx inc;
|
|
77 rtx reg;
|
|
78 rtx inc_src;
|
|
79 rtx condition;
|
|
80 rtx pattern;
|
|
81
|
|
82 /* The canonical doloop pattern we expect has one of the following
|
|
83 forms:
|
|
84
|
|
85 1) (parallel [(set (pc) (if_then_else (condition)
|
|
86 (label_ref (label))
|
|
87 (pc)))
|
|
88 (set (reg) (plus (reg) (const_int -1)))
|
|
89 (additional clobbers and uses)])
|
|
90
|
|
91 The branch must be the first entry of the parallel (also required
|
|
92 by jump.c), and the second entry of the parallel must be a set of
|
|
93 the loop counter register. Some targets (IA-64) wrap the set of
|
|
94 the loop counter in an if_then_else too.
|
|
95
|
|
96 2) (set (reg) (plus (reg) (const_int -1))
|
|
97 (set (pc) (if_then_else (reg != 0)
|
|
98 (label_ref (label))
|
|
99 (pc))). */
|
|
100
|
|
101 pattern = PATTERN (doloop_pat);
|
|
102
|
|
103 if (GET_CODE (pattern) != PARALLEL)
|
|
104 {
|
|
105 rtx cond;
|
|
106
|
|
107 /* We expect the decrement to immediately precede the branch. */
|
|
108
|
|
109 if ((PREV_INSN (doloop_pat) == NULL_RTX)
|
|
110 || !INSN_P (PREV_INSN (doloop_pat)))
|
|
111 return 0;
|
|
112
|
|
113 cmp = pattern;
|
|
114 inc = PATTERN (PREV_INSN (doloop_pat));
|
|
115 /* We expect the condition to be of the form (reg != 0) */
|
|
116 cond = XEXP (SET_SRC (cmp), 0);
|
|
117 if (GET_CODE (cond) != NE || XEXP (cond, 1) != const0_rtx)
|
|
118 return 0;
|
|
119
|
|
120 }
|
|
121 else
|
|
122 {
|
|
123 cmp = XVECEXP (pattern, 0, 0);
|
|
124 inc = XVECEXP (pattern, 0, 1);
|
|
125 }
|
|
126
|
|
127 /* Check for (set (reg) (something)). */
|
|
128 if (GET_CODE (inc) != SET)
|
|
129 return 0;
|
|
130 reg = SET_DEST (inc);
|
|
131 if (! REG_P (reg))
|
|
132 return 0;
|
|
133
|
|
134 /* Check if something = (plus (reg) (const_int -1)).
|
|
135 On IA-64, this decrement is wrapped in an if_then_else. */
|
|
136 inc_src = SET_SRC (inc);
|
|
137 if (GET_CODE (inc_src) == IF_THEN_ELSE)
|
|
138 inc_src = XEXP (inc_src, 1);
|
|
139 if (GET_CODE (inc_src) != PLUS
|
|
140 || XEXP (inc_src, 0) != reg
|
|
141 || XEXP (inc_src, 1) != constm1_rtx)
|
|
142 return 0;
|
|
143
|
|
144 /* Check for (set (pc) (if_then_else (condition)
|
|
145 (label_ref (label))
|
|
146 (pc))). */
|
|
147 if (GET_CODE (cmp) != SET
|
|
148 || SET_DEST (cmp) != pc_rtx
|
|
149 || GET_CODE (SET_SRC (cmp)) != IF_THEN_ELSE
|
|
150 || GET_CODE (XEXP (SET_SRC (cmp), 1)) != LABEL_REF
|
|
151 || XEXP (SET_SRC (cmp), 2) != pc_rtx)
|
|
152 return 0;
|
|
153
|
|
154 /* Extract loop termination condition. */
|
|
155 condition = XEXP (SET_SRC (cmp), 0);
|
|
156
|
|
157 /* We expect a GE or NE comparison with 0 or 1. */
|
|
158 if ((GET_CODE (condition) != GE
|
|
159 && GET_CODE (condition) != NE)
|
|
160 || (XEXP (condition, 1) != const0_rtx
|
|
161 && XEXP (condition, 1) != const1_rtx))
|
|
162 return 0;
|
|
163
|
|
164 if ((XEXP (condition, 0) == reg)
|
|
165 || (GET_CODE (XEXP (condition, 0)) == PLUS
|
|
166 && XEXP (XEXP (condition, 0), 0) == reg))
|
|
167 {
|
|
168 if (GET_CODE (pattern) != PARALLEL)
|
|
169 /* The second form we expect:
|
|
170
|
|
171 (set (reg) (plus (reg) (const_int -1))
|
|
172 (set (pc) (if_then_else (reg != 0)
|
|
173 (label_ref (label))
|
|
174 (pc))).
|
|
175
|
|
176 is equivalent to the following:
|
|
177
|
|
178 (parallel [(set (pc) (if_then_else (reg != 1)
|
|
179 (label_ref (label))
|
|
180 (pc)))
|
|
181 (set (reg) (plus (reg) (const_int -1)))
|
|
182 (additional clobbers and uses)])
|
|
183
|
|
184 So we return that form instead.
|
|
185 */
|
|
186 condition = gen_rtx_fmt_ee (NE, VOIDmode, inc_src, const1_rtx);
|
|
187
|
|
188 return condition;
|
|
189 }
|
|
190
|
|
191 /* ??? If a machine uses a funny comparison, we could return a
|
|
192 canonicalized form here. */
|
|
193
|
|
194 return 0;
|
|
195 }
|
|
196
|
|
197 /* Return nonzero if the loop specified by LOOP is suitable for
|
|
198 the use of special low-overhead looping instructions. DESC
|
|
199 describes the number of iterations of the loop. */
|
|
200
|
|
201 static bool
|
|
202 doloop_valid_p (struct loop *loop, struct niter_desc *desc)
|
|
203 {
|
|
204 basic_block *body = get_loop_body (loop), bb;
|
|
205 rtx insn;
|
|
206 unsigned i;
|
|
207 bool result = true;
|
|
208
|
|
209 /* Check for loops that may not terminate under special conditions. */
|
|
210 if (!desc->simple_p
|
|
211 || desc->assumptions
|
|
212 || desc->infinite)
|
|
213 {
|
|
214 /* There are some cases that would require a special attention.
|
|
215 For example if the comparison is LEU and the comparison value
|
|
216 is UINT_MAX then the loop will not terminate. Similarly, if the
|
|
217 comparison code is GEU and the comparison value is 0, the
|
|
218 loop will not terminate.
|
|
219
|
|
220 If the absolute increment is not 1, the loop can be infinite
|
|
221 even with LTU/GTU, e.g. for (i = 3; i > 0; i -= 2)
|
|
222
|
|
223 ??? We could compute these conditions at run-time and have a
|
|
224 additional jump around the loop to ensure an infinite loop.
|
|
225 However, it is very unlikely that this is the intended
|
|
226 behavior of the loop and checking for these rare boundary
|
|
227 conditions would pessimize all other code.
|
|
228
|
|
229 If the loop is executed only a few times an extra check to
|
|
230 restart the loop could use up most of the benefits of using a
|
|
231 count register loop. Note however, that normally, this
|
|
232 restart branch would never execute, so it could be predicted
|
|
233 well by the CPU. We should generate the pessimistic code by
|
|
234 default, and have an option, e.g. -funsafe-loops that would
|
|
235 enable count-register loops in this case. */
|
|
236 if (dump_file)
|
|
237 fprintf (dump_file, "Doloop: Possible infinite iteration case.\n");
|
|
238 result = false;
|
|
239 goto cleanup;
|
|
240 }
|
|
241
|
|
242 for (i = 0; i < loop->num_nodes; i++)
|
|
243 {
|
|
244 bb = body[i];
|
|
245
|
|
246 for (insn = BB_HEAD (bb);
|
|
247 insn != NEXT_INSN (BB_END (bb));
|
|
248 insn = NEXT_INSN (insn))
|
|
249 {
|
|
250 /* Different targets have different necessities for low-overhead
|
|
251 looping. Call the back end for each instruction within the loop
|
|
252 to let it decide whether the insn prohibits a low-overhead loop.
|
|
253 It will then return the cause for it to emit to the dump file. */
|
|
254 const char * invalid = targetm.invalid_within_doloop (insn);
|
|
255 if (invalid)
|
|
256 {
|
|
257 if (dump_file)
|
|
258 fprintf (dump_file, "Doloop: %s\n", invalid);
|
|
259 result = false;
|
|
260 goto cleanup;
|
|
261 }
|
|
262 }
|
|
263 }
|
|
264 result = true;
|
|
265
|
|
266 cleanup:
|
|
267 free (body);
|
|
268
|
|
269 return result;
|
|
270 }
|
|
271
|
|
272 /* Adds test of COND jumping to DEST on edge *E and set *E to the new fallthru
|
|
273 edge. If the condition is always false, do not do anything. If it is always
|
|
274 true, redirect E to DEST and return false. In all other cases, true is
|
|
275 returned. */
|
|
276
|
|
277 static bool
|
|
278 add_test (rtx cond, edge *e, basic_block dest)
|
|
279 {
|
|
280 rtx seq, jump, label;
|
|
281 enum machine_mode mode;
|
|
282 rtx op0 = XEXP (cond, 0), op1 = XEXP (cond, 1);
|
|
283 enum rtx_code code = GET_CODE (cond);
|
|
284 basic_block bb;
|
|
285
|
|
286 mode = GET_MODE (XEXP (cond, 0));
|
|
287 if (mode == VOIDmode)
|
|
288 mode = GET_MODE (XEXP (cond, 1));
|
|
289
|
|
290 start_sequence ();
|
|
291 op0 = force_operand (op0, NULL_RTX);
|
|
292 op1 = force_operand (op1, NULL_RTX);
|
|
293 label = block_label (dest);
|
|
294 do_compare_rtx_and_jump (op0, op1, code, 0, mode, NULL_RTX, NULL_RTX, label);
|
|
295
|
|
296 jump = get_last_insn ();
|
|
297 if (!jump || !JUMP_P (jump))
|
|
298 {
|
|
299 /* The condition is always false and the jump was optimized out. */
|
|
300 end_sequence ();
|
|
301 return true;
|
|
302 }
|
|
303
|
|
304 seq = get_insns ();
|
|
305 end_sequence ();
|
|
306
|
|
307 /* There always is at least the jump insn in the sequence. */
|
|
308 gcc_assert (seq != NULL_RTX);
|
|
309
|
|
310 bb = split_edge_and_insert (*e, seq);
|
|
311 *e = single_succ_edge (bb);
|
|
312
|
|
313 if (any_uncondjump_p (jump))
|
|
314 {
|
|
315 /* The condition is always true. */
|
|
316 delete_insn (jump);
|
|
317 redirect_edge_and_branch_force (*e, dest);
|
|
318 return false;
|
|
319 }
|
|
320
|
|
321 JUMP_LABEL (jump) = label;
|
|
322
|
|
323 /* The jump is supposed to handle an unlikely special case. */
|
|
324 add_reg_note (jump, REG_BR_PROB, const0_rtx);
|
|
325
|
|
326 LABEL_NUSES (label)++;
|
|
327
|
|
328 make_edge (bb, dest, (*e)->flags & ~EDGE_FALLTHRU);
|
|
329 return true;
|
|
330 }
|
|
331
|
|
332 /* Modify the loop to use the low-overhead looping insn where LOOP
|
|
333 describes the loop, DESC describes the number of iterations of the
|
|
334 loop, and DOLOOP_INSN is the low-overhead looping insn to emit at the
|
|
335 end of the loop. CONDITION is the condition separated from the
|
|
336 DOLOOP_SEQ. COUNT is the number of iterations of the LOOP.
|
|
337 ZERO_EXTEND_P says to zero extend COUNT after the increment of it to
|
|
338 word_mode from FROM_MODE. */
|
|
339
|
|
340 static void
|
|
341 doloop_modify (struct loop *loop, struct niter_desc *desc,
|
|
342 rtx doloop_seq, rtx condition, rtx count,
|
|
343 bool zero_extend_p, enum machine_mode from_mode)
|
|
344 {
|
|
345 rtx counter_reg;
|
|
346 rtx tmp, noloop = NULL_RTX;
|
|
347 rtx sequence;
|
|
348 rtx jump_insn;
|
|
349 rtx jump_label;
|
|
350 int nonneg = 0;
|
|
351 bool increment_count;
|
|
352 basic_block loop_end = desc->out_edge->src;
|
|
353 enum machine_mode mode;
|
|
354 rtx true_prob_val;
|
|
355
|
|
356 jump_insn = BB_END (loop_end);
|
|
357
|
|
358 if (dump_file)
|
|
359 {
|
|
360 fprintf (dump_file, "Doloop: Inserting doloop pattern (");
|
|
361 if (desc->const_iter)
|
|
362 fprintf (dump_file, HOST_WIDEST_INT_PRINT_DEC, desc->niter);
|
|
363 else
|
|
364 fputs ("runtime", dump_file);
|
|
365 fputs (" iterations).\n", dump_file);
|
|
366 }
|
|
367
|
|
368 /* Get the probability of the original branch. If it exists we would
|
|
369 need to update REG_BR_PROB of the new jump_insn. */
|
|
370 true_prob_val = find_reg_note (jump_insn, REG_BR_PROB, NULL_RTX);
|
|
371
|
|
372 /* Discard original jump to continue loop. The original compare
|
|
373 result may still be live, so it cannot be discarded explicitly. */
|
|
374 delete_insn (jump_insn);
|
|
375
|
|
376 counter_reg = XEXP (condition, 0);
|
|
377 if (GET_CODE (counter_reg) == PLUS)
|
|
378 counter_reg = XEXP (counter_reg, 0);
|
|
379 mode = GET_MODE (counter_reg);
|
|
380
|
|
381 increment_count = false;
|
|
382 switch (GET_CODE (condition))
|
|
383 {
|
|
384 case NE:
|
|
385 /* Currently only NE tests against zero and one are supported. */
|
|
386 noloop = XEXP (condition, 1);
|
|
387 if (noloop != const0_rtx)
|
|
388 {
|
|
389 gcc_assert (noloop == const1_rtx);
|
|
390 increment_count = true;
|
|
391 }
|
|
392 break;
|
|
393
|
|
394 case GE:
|
|
395 /* Currently only GE tests against zero are supported. */
|
|
396 gcc_assert (XEXP (condition, 1) == const0_rtx);
|
|
397
|
|
398 noloop = constm1_rtx;
|
|
399
|
|
400 /* The iteration count does not need incrementing for a GE test. */
|
|
401 increment_count = false;
|
|
402
|
|
403 /* Determine if the iteration counter will be non-negative.
|
|
404 Note that the maximum value loaded is iterations_max - 1. */
|
|
405 if (desc->niter_max
|
|
406 <= ((unsigned HOST_WIDEST_INT) 1
|
|
407 << (GET_MODE_BITSIZE (mode) - 1)))
|
|
408 nonneg = 1;
|
|
409 break;
|
|
410
|
|
411 /* Abort if an invalid doloop pattern has been generated. */
|
|
412 default:
|
|
413 gcc_unreachable ();
|
|
414 }
|
|
415
|
|
416 if (increment_count)
|
|
417 count = simplify_gen_binary (PLUS, from_mode, count, const1_rtx);
|
|
418
|
|
419 if (zero_extend_p)
|
|
420 count = simplify_gen_unary (ZERO_EXTEND, word_mode,
|
|
421 count, from_mode);
|
|
422
|
|
423 /* Insert initialization of the count register into the loop header. */
|
|
424 start_sequence ();
|
|
425 tmp = force_operand (count, counter_reg);
|
|
426 convert_move (counter_reg, tmp, 1);
|
|
427 sequence = get_insns ();
|
|
428 end_sequence ();
|
|
429 emit_insn_after (sequence, BB_END (loop_preheader_edge (loop)->src));
|
|
430
|
|
431 if (desc->noloop_assumptions)
|
|
432 {
|
|
433 rtx ass = copy_rtx (desc->noloop_assumptions);
|
|
434 basic_block preheader = loop_preheader_edge (loop)->src;
|
|
435 basic_block set_zero
|
|
436 = split_edge (loop_preheader_edge (loop));
|
|
437 basic_block new_preheader
|
|
438 = split_edge (loop_preheader_edge (loop));
|
|
439 edge te;
|
|
440
|
|
441 /* Expand the condition testing the assumptions and if it does not pass,
|
|
442 reset the count register to 0. */
|
|
443 redirect_edge_and_branch_force (single_succ_edge (preheader), new_preheader);
|
|
444 set_immediate_dominator (CDI_DOMINATORS, new_preheader, preheader);
|
|
445
|
|
446 set_zero->count = 0;
|
|
447 set_zero->frequency = 0;
|
|
448
|
|
449 te = single_succ_edge (preheader);
|
|
450 for (; ass; ass = XEXP (ass, 1))
|
|
451 if (!add_test (XEXP (ass, 0), &te, set_zero))
|
|
452 break;
|
|
453
|
|
454 if (ass)
|
|
455 {
|
|
456 /* We reached a condition that is always true. This is very hard to
|
|
457 reproduce (such a loop does not roll, and thus it would most
|
|
458 likely get optimized out by some of the preceding optimizations).
|
|
459 In fact, I do not have any testcase for it. However, it would
|
|
460 also be very hard to show that it is impossible, so we must
|
|
461 handle this case. */
|
|
462 set_zero->count = preheader->count;
|
|
463 set_zero->frequency = preheader->frequency;
|
|
464 }
|
|
465
|
|
466 if (EDGE_COUNT (set_zero->preds) == 0)
|
|
467 {
|
|
468 /* All the conditions were simplified to false, remove the
|
|
469 unreachable set_zero block. */
|
|
470 delete_basic_block (set_zero);
|
|
471 }
|
|
472 else
|
|
473 {
|
|
474 /* Reset the counter to zero in the set_zero block. */
|
|
475 start_sequence ();
|
|
476 convert_move (counter_reg, noloop, 0);
|
|
477 sequence = get_insns ();
|
|
478 end_sequence ();
|
|
479 emit_insn_after (sequence, BB_END (set_zero));
|
|
480
|
|
481 set_immediate_dominator (CDI_DOMINATORS, set_zero,
|
|
482 recompute_dominator (CDI_DOMINATORS,
|
|
483 set_zero));
|
|
484 }
|
|
485
|
|
486 set_immediate_dominator (CDI_DOMINATORS, new_preheader,
|
|
487 recompute_dominator (CDI_DOMINATORS,
|
|
488 new_preheader));
|
|
489 }
|
|
490
|
|
491 /* Some targets (eg, C4x) need to initialize special looping
|
|
492 registers. */
|
|
493 #ifdef HAVE_doloop_begin
|
|
494 {
|
|
495 rtx init;
|
|
496 unsigned level = get_loop_level (loop) + 1;
|
|
497 init = gen_doloop_begin (counter_reg,
|
|
498 desc->const_iter ? desc->niter_expr : const0_rtx,
|
|
499 GEN_INT (desc->niter_max),
|
|
500 GEN_INT (level));
|
|
501 if (init)
|
|
502 {
|
|
503 start_sequence ();
|
|
504 emit_insn (init);
|
|
505 sequence = get_insns ();
|
|
506 end_sequence ();
|
|
507 emit_insn_after (sequence, BB_END (loop_preheader_edge (loop)->src));
|
|
508 }
|
|
509 }
|
|
510 #endif
|
|
511
|
|
512 /* Insert the new low-overhead looping insn. */
|
|
513 emit_jump_insn_after (doloop_seq, BB_END (loop_end));
|
|
514 jump_insn = BB_END (loop_end);
|
|
515 jump_label = block_label (desc->in_edge->dest);
|
|
516 JUMP_LABEL (jump_insn) = jump_label;
|
|
517 LABEL_NUSES (jump_label)++;
|
|
518
|
|
519 /* Ensure the right fallthru edge is marked, for case we have reversed
|
|
520 the condition. */
|
|
521 desc->in_edge->flags &= ~EDGE_FALLTHRU;
|
|
522 desc->out_edge->flags |= EDGE_FALLTHRU;
|
|
523
|
|
524 /* Add a REG_NONNEG note if the actual or estimated maximum number
|
|
525 of iterations is non-negative. */
|
|
526 if (nonneg)
|
|
527 add_reg_note (jump_insn, REG_NONNEG, NULL_RTX);
|
|
528
|
|
529 /* Update the REG_BR_PROB note. */
|
|
530 if (true_prob_val)
|
|
531 {
|
|
532 /* Seems safer to use the branch probability. */
|
|
533 add_reg_note (jump_insn, REG_BR_PROB,
|
|
534 GEN_INT (desc->in_edge->probability));
|
|
535 }
|
|
536 }
|
|
537
|
|
538 /* Process loop described by LOOP validating that the loop is suitable for
|
|
539 conversion to use a low overhead looping instruction, replacing the jump
|
|
540 insn where suitable. Returns true if the loop was successfully
|
|
541 modified. */
|
|
542
|
|
543 static bool
|
|
544 doloop_optimize (struct loop *loop)
|
|
545 {
|
|
546 enum machine_mode mode;
|
|
547 rtx doloop_seq, doloop_pat, doloop_reg;
|
|
548 rtx iterations, count;
|
|
549 rtx iterations_max;
|
|
550 rtx start_label;
|
|
551 rtx condition;
|
|
552 unsigned level, est_niter;
|
|
553 int max_cost;
|
|
554 struct niter_desc *desc;
|
|
555 unsigned word_mode_size;
|
|
556 unsigned HOST_WIDE_INT word_mode_max;
|
|
557 bool zero_extend_p = false;
|
|
558
|
|
559 if (dump_file)
|
|
560 fprintf (dump_file, "Doloop: Processing loop %d.\n", loop->num);
|
|
561
|
|
562 iv_analysis_loop_init (loop);
|
|
563
|
|
564 /* Find the simple exit of a LOOP. */
|
|
565 desc = get_simple_loop_desc (loop);
|
|
566
|
|
567 /* Check that loop is a candidate for a low-overhead looping insn. */
|
|
568 if (!doloop_valid_p (loop, desc))
|
|
569 {
|
|
570 if (dump_file)
|
|
571 fprintf (dump_file,
|
|
572 "Doloop: The loop is not suitable.\n");
|
|
573 return false;
|
|
574 }
|
|
575 mode = desc->mode;
|
|
576
|
|
577 est_niter = 3;
|
|
578 if (desc->const_iter)
|
|
579 est_niter = desc->niter;
|
|
580 /* If the estimate on number of iterations is reliable (comes from profile
|
|
581 feedback), use it. Do not use it normally, since the expected number
|
|
582 of iterations of an unrolled loop is 2. */
|
|
583 if (loop->header->count)
|
|
584 est_niter = expected_loop_iterations (loop);
|
|
585
|
|
586 if (est_niter < 3)
|
|
587 {
|
|
588 if (dump_file)
|
|
589 fprintf (dump_file,
|
|
590 "Doloop: Too few iterations (%u) to be profitable.\n",
|
|
591 est_niter);
|
|
592 return false;
|
|
593 }
|
|
594
|
|
595 max_cost
|
|
596 = COSTS_N_INSNS (PARAM_VALUE (PARAM_MAX_ITERATIONS_COMPUTATION_COST));
|
|
597 if (rtx_cost (desc->niter_expr, SET, optimize_loop_for_speed_p (loop))
|
|
598 > max_cost)
|
|
599 {
|
|
600 if (dump_file)
|
|
601 fprintf (dump_file,
|
|
602 "Doloop: number of iterations too costly to compute.\n");
|
|
603 return false;
|
|
604 }
|
|
605
|
|
606 count = copy_rtx (desc->niter_expr);
|
|
607 iterations = desc->const_iter ? desc->niter_expr : const0_rtx;
|
|
608 iterations_max = GEN_INT (desc->niter_max);
|
|
609 level = get_loop_level (loop) + 1;
|
|
610
|
|
611 /* Generate looping insn. If the pattern FAILs then give up trying
|
|
612 to modify the loop since there is some aspect the back-end does
|
|
613 not like. */
|
|
614 start_label = block_label (desc->in_edge->dest);
|
|
615 doloop_reg = gen_reg_rtx (mode);
|
|
616 doloop_seq = gen_doloop_end (doloop_reg, iterations, iterations_max,
|
|
617 GEN_INT (level), start_label);
|
|
618
|
|
619 word_mode_size = GET_MODE_BITSIZE (word_mode);
|
|
620 word_mode_max
|
|
621 = ((unsigned HOST_WIDE_INT) 1 << (word_mode_size - 1) << 1) - 1;
|
|
622 if (! doloop_seq
|
|
623 && mode != word_mode
|
|
624 /* Before trying mode different from the one in that # of iterations is
|
|
625 computed, we must be sure that the number of iterations fits into
|
|
626 the new mode. */
|
|
627 && (word_mode_size >= GET_MODE_BITSIZE (mode)
|
|
628 || desc->niter_max <= word_mode_max))
|
|
629 {
|
|
630 if (word_mode_size > GET_MODE_BITSIZE (mode))
|
|
631 {
|
|
632 zero_extend_p = true;
|
|
633 iterations = simplify_gen_unary (ZERO_EXTEND, word_mode,
|
|
634 iterations, mode);
|
|
635 iterations_max = simplify_gen_unary (ZERO_EXTEND, word_mode,
|
|
636 iterations_max, mode);
|
|
637 }
|
|
638 else
|
|
639 {
|
|
640 count = lowpart_subreg (word_mode, count, mode);
|
|
641 iterations = lowpart_subreg (word_mode, iterations, mode);
|
|
642 iterations_max = lowpart_subreg (word_mode, iterations_max, mode);
|
|
643 }
|
|
644 PUT_MODE (doloop_reg, word_mode);
|
|
645 doloop_seq = gen_doloop_end (doloop_reg, iterations, iterations_max,
|
|
646 GEN_INT (level), start_label);
|
|
647 }
|
|
648 if (! doloop_seq)
|
|
649 {
|
|
650 if (dump_file)
|
|
651 fprintf (dump_file,
|
|
652 "Doloop: Target unwilling to use doloop pattern!\n");
|
|
653 return false;
|
|
654 }
|
|
655
|
|
656 /* If multiple instructions were created, the last must be the
|
|
657 jump instruction. Also, a raw define_insn may yield a plain
|
|
658 pattern. */
|
|
659 doloop_pat = doloop_seq;
|
|
660 if (INSN_P (doloop_pat))
|
|
661 {
|
|
662 while (NEXT_INSN (doloop_pat) != NULL_RTX)
|
|
663 doloop_pat = NEXT_INSN (doloop_pat);
|
|
664 if (!JUMP_P (doloop_pat))
|
|
665 doloop_pat = NULL_RTX;
|
|
666 }
|
|
667
|
|
668 if (! doloop_pat
|
|
669 || ! (condition = doloop_condition_get (doloop_pat)))
|
|
670 {
|
|
671 if (dump_file)
|
|
672 fprintf (dump_file, "Doloop: Unrecognizable doloop pattern!\n");
|
|
673 return false;
|
|
674 }
|
|
675
|
|
676 doloop_modify (loop, desc, doloop_seq, condition, count,
|
|
677 zero_extend_p, mode);
|
|
678 return true;
|
|
679 }
|
|
680
|
|
681 /* This is the main entry point. Process all loops using doloop_optimize. */
|
|
682
|
|
683 void
|
|
684 doloop_optimize_loops (void)
|
|
685 {
|
|
686 loop_iterator li;
|
|
687 struct loop *loop;
|
|
688
|
|
689 FOR_EACH_LOOP (li, loop, 0)
|
|
690 {
|
|
691 doloop_optimize (loop);
|
|
692 }
|
|
693
|
|
694 iv_analysis_done ();
|
|
695
|
|
696 #ifdef ENABLE_CHECKING
|
|
697 verify_dominators (CDI_DOMINATORS);
|
|
698 verify_loop_structure ();
|
|
699 #endif
|
|
700 }
|
|
701 #endif /* HAVE_doloop_end */
|
|
702
|