Mercurial > hg > CbC > CbC_gcc
annotate gcc/sched-ebb.c @ 158:494b0b89df80 default tip
...
author | Shinji KONO <kono@ie.u-ryukyu.ac.jp> |
---|---|
date | Mon, 25 May 2020 18:13:55 +0900 |
parents | 1830386684a0 |
children |
rev | line source |
---|---|
0 | 1 /* Instruction scheduling pass. |
145 | 2 Copyright (C) 1992-2020 Free Software Foundation, Inc. |
0 | 3 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by, |
4 and currently maintained by, Jim Wilson (wilson@cygnus.com) | |
5 | |
6 This file is part of GCC. | |
7 | |
8 GCC is free software; you can redistribute it and/or modify it under | |
9 the terms of the GNU General Public License as published by the Free | |
10 Software Foundation; either version 3, or (at your option) any later | |
11 version. | |
12 | |
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY | |
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
16 for more details. | |
17 | |
18 You should have received a copy of the GNU General Public License | |
19 along with GCC; see the file COPYING3. If not see | |
20 <http://www.gnu.org/licenses/>. */ | |
21 | |
22 #include "config.h" | |
23 #include "system.h" | |
24 #include "coretypes.h" | |
111 | 25 #include "backend.h" |
26 #include "target.h" | |
0 | 27 #include "rtl.h" |
111 | 28 #include "cfghooks.h" |
29 #include "df.h" | |
30 #include "profile.h" | |
0 | 31 #include "insn-attr.h" |
111 | 32 #include "cfgrtl.h" |
33 #include "cfgbuild.h" | |
0 | 34 #include "sched-int.h" |
35 | |
36 | |
37 #ifdef INSN_SCHEDULING | |
38 | |
39 /* The number of insns to be scheduled in total. */ | |
40 static int rgn_n_insns; | |
41 | |
42 /* The number of insns scheduled so far. */ | |
43 static int sched_rgn_n_insns; | |
44 | |
45 /* Set of blocks, that already have their dependencies calculated. */ | |
46 static bitmap_head dont_calc_deps; | |
47 | |
48 /* Last basic block in current ebb. */ | |
49 static basic_block last_bb; | |
50 | |
51 /* Implementations of the sched_info functions for region scheduling. */ | |
52 static void init_ready_list (void); | |
111 | 53 static void begin_schedule_ready (rtx_insn *); |
0 | 54 static int schedule_more_p (void); |
111 | 55 static const char *ebb_print_insn (const rtx_insn *, int); |
56 static int rank (rtx_insn *, rtx_insn *); | |
57 static int ebb_contributes_to_priority (rtx_insn *, rtx_insn *); | |
0 | 58 static basic_block earliest_block_with_similiar_load (basic_block, rtx); |
111 | 59 static void add_deps_for_risky_insns (rtx_insn *, rtx_insn *); |
60 static void debug_ebb_dependencies (rtx_insn *, rtx_insn *); | |
61 | |
62 static void ebb_add_remove_insn (rtx_insn *, int); | |
63 static void ebb_add_block (basic_block, basic_block); | |
64 static basic_block advance_target_bb (basic_block, rtx_insn *); | |
65 static void ebb_fix_recovery_cfg (int, int, int); | |
0 | 66 |
111 | 67 /* Allocate memory and store the state of the frontend. Return the allocated |
68 memory. */ | |
69 static void * | |
70 save_ebb_state (void) | |
71 { | |
72 int *p = XNEW (int); | |
73 *p = sched_rgn_n_insns; | |
74 return p; | |
75 } | |
76 | |
77 /* Restore the state of the frontend from P_, then free it. */ | |
78 static void | |
79 restore_ebb_state (void *p_) | |
80 { | |
81 int *p = (int *)p_; | |
82 sched_rgn_n_insns = *p; | |
83 free (p_); | |
84 } | |
0 | 85 |
86 /* Return nonzero if there are more insns that should be scheduled. */ | |
87 | |
88 static int | |
89 schedule_more_p (void) | |
90 { | |
91 return sched_rgn_n_insns < rgn_n_insns; | |
92 } | |
93 | |
94 /* Print dependency information about ebb between HEAD and TAIL. */ | |
95 static void | |
111 | 96 debug_ebb_dependencies (rtx_insn *head, rtx_insn *tail) |
0 | 97 { |
98 fprintf (sched_dump, | |
99 ";; --------------- forward dependences: ------------ \n"); | |
100 | |
101 fprintf (sched_dump, "\n;; --- EBB Dependences --- from bb%d to bb%d \n", | |
102 BLOCK_NUM (head), BLOCK_NUM (tail)); | |
103 | |
104 debug_dependencies (head, tail); | |
105 } | |
106 | |
107 /* Add all insns that are initially ready to the ready list READY. Called | |
108 once before scheduling a set of insns. */ | |
109 | |
110 static void | |
111 init_ready_list (void) | |
112 { | |
113 int n = 0; | |
111 | 114 rtx_insn *prev_head = current_sched_info->prev_head; |
115 rtx_insn *next_tail = current_sched_info->next_tail; | |
116 rtx_insn *insn; | |
0 | 117 |
118 sched_rgn_n_insns = 0; | |
119 | |
120 /* Print debugging information. */ | |
121 if (sched_verbose >= 5) | |
122 debug_ebb_dependencies (NEXT_INSN (prev_head), PREV_INSN (next_tail)); | |
123 | |
124 /* Initialize ready list with all 'ready' insns in target block. | |
125 Count number of insns in the target block being scheduled. */ | |
126 for (insn = NEXT_INSN (prev_head); insn != next_tail; insn = NEXT_INSN (insn)) | |
127 { | |
128 try_ready (insn); | |
129 n++; | |
130 } | |
131 | |
132 gcc_assert (n == rgn_n_insns); | |
133 } | |
134 | |
135 /* INSN is being scheduled after LAST. Update counters. */ | |
136 static void | |
111 | 137 begin_schedule_ready (rtx_insn *insn ATTRIBUTE_UNUSED) |
0 | 138 { |
139 sched_rgn_n_insns++; | |
111 | 140 } |
0 | 141 |
111 | 142 /* INSN is being moved to its place in the schedule, after LAST. */ |
143 static void | |
144 begin_move_insn (rtx_insn *insn, rtx_insn *last) | |
145 { | |
0 | 146 if (BLOCK_FOR_INSN (insn) == last_bb |
147 /* INSN is a jump in the last block, ... */ | |
148 && control_flow_insn_p (insn) | |
149 /* that is going to be moved over some instructions. */ | |
150 && last != PREV_INSN (insn)) | |
151 { | |
152 edge e; | |
153 basic_block bb; | |
154 | |
155 /* An obscure special case, where we do have partially dead | |
156 instruction scheduled after last control flow instruction. | |
157 In this case we can create new basic block. It is | |
158 always exactly one basic block last in the sequence. */ | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
159 |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
160 e = find_fallthru_edge (last_bb->succs); |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
161 |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
162 gcc_checking_assert (!e || !(e->flags & EDGE_COMPLEX)); |
0 | 163 |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
164 gcc_checking_assert (BLOCK_FOR_INSN (insn) == last_bb |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
165 && !IS_SPECULATION_CHECK_P (insn) |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
166 && BB_HEAD (last_bb) != insn |
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
167 && BB_END (last_bb) == insn); |
0 | 168 |
169 { | |
111 | 170 rtx_insn *x = NEXT_INSN (insn); |
0 | 171 if (e) |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
172 gcc_checking_assert (NOTE_P (x) || LABEL_P (x)); |
0 | 173 else |
67
f6334be47118
update gcc from gcc-4.6-20100522 to gcc-4.6-20110318
nobuyasu <dimolto@cr.ie.u-ryukyu.ac.jp>
parents:
55
diff
changeset
|
174 gcc_checking_assert (BARRIER_P (x)); |
0 | 175 } |
176 | |
177 if (e) | |
178 { | |
179 bb = split_edge (e); | |
180 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (BB_END (bb))); | |
181 } | |
182 else | |
111 | 183 { |
184 /* Create an empty unreachable block after the INSN. */ | |
185 rtx_insn *next = NEXT_INSN (insn); | |
186 if (next && BARRIER_P (next)) | |
187 next = NEXT_INSN (next); | |
188 bb = create_basic_block (next, NULL_RTX, last_bb); | |
189 } | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
190 |
0 | 191 /* split_edge () creates BB before E->DEST. Keep in mind, that |
192 this operation extends scheduling region till the end of BB. | |
193 Hence, we need to shift NEXT_TAIL, so haifa-sched.c won't go out | |
194 of the scheduling region. */ | |
195 current_sched_info->next_tail = NEXT_INSN (BB_END (bb)); | |
196 gcc_assert (current_sched_info->next_tail); | |
197 | |
198 /* Append new basic block to the end of the ebb. */ | |
199 sched_init_only_bb (bb, last_bb); | |
200 gcc_assert (last_bb == bb); | |
201 } | |
202 } | |
203 | |
204 /* Return a string that contains the insn uid and optionally anything else | |
205 necessary to identify this insn in an output. It's valid to use a | |
206 static buffer for this. The ALIGNED parameter should cause the string | |
207 to be formatted so that multiple output lines will line up nicely. */ | |
208 | |
209 static const char * | |
111 | 210 ebb_print_insn (const rtx_insn *insn, int aligned ATTRIBUTE_UNUSED) |
0 | 211 { |
212 static char tmp[80]; | |
213 | |
214 /* '+' before insn means it is a new cycle start. */ | |
215 if (GET_MODE (insn) == TImode) | |
216 sprintf (tmp, "+ %4d", INSN_UID (insn)); | |
217 else | |
218 sprintf (tmp, " %4d", INSN_UID (insn)); | |
219 | |
220 return tmp; | |
221 } | |
222 | |
223 /* Compare priority of two insns. Return a positive number if the second | |
224 insn is to be preferred for scheduling, and a negative one if the first | |
225 is to be preferred. Zero if they are equally good. */ | |
226 | |
227 static int | |
111 | 228 rank (rtx_insn *insn1, rtx_insn *insn2) |
0 | 229 { |
230 basic_block bb1 = BLOCK_FOR_INSN (insn1); | |
231 basic_block bb2 = BLOCK_FOR_INSN (insn2); | |
232 | |
131 | 233 if (bb1->count > bb2->count) |
0 | 234 return -1; |
131 | 235 if (bb1->count < bb2->count) |
0 | 236 return 1; |
237 return 0; | |
238 } | |
239 | |
240 /* NEXT is an instruction that depends on INSN (a backward dependence); | |
241 return nonzero if we should include this dependence in priority | |
242 calculations. */ | |
243 | |
244 static int | |
111 | 245 ebb_contributes_to_priority (rtx_insn *next ATTRIBUTE_UNUSED, |
246 rtx_insn *insn ATTRIBUTE_UNUSED) | |
0 | 247 { |
248 return 1; | |
249 } | |
250 | |
111 | 251 /* INSN is a JUMP_INSN. Store the set of registers that |
252 must be considered as used by this jump in USED. */ | |
0 | 253 |
254 void | |
111 | 255 ebb_compute_jump_reg_dependencies (rtx insn, regset used) |
0 | 256 { |
257 basic_block b = BLOCK_FOR_INSN (insn); | |
258 edge e; | |
259 edge_iterator ei; | |
260 | |
261 FOR_EACH_EDGE (e, ei, b->succs) | |
111 | 262 if ((e->flags & EDGE_FALLTHRU) == 0) |
0 | 263 bitmap_ior_into (used, df_get_live_in (e->dest)); |
264 } | |
265 | |
266 /* Used in schedule_insns to initialize current_sched_info for scheduling | |
267 regions (or single basic blocks). */ | |
268 | |
269 static struct common_sched_info_def ebb_common_sched_info; | |
270 | |
271 static struct sched_deps_info_def ebb_sched_deps_info = | |
272 { | |
273 ebb_compute_jump_reg_dependencies, | |
274 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, | |
275 NULL, | |
276 1, 0, 0 | |
277 }; | |
278 | |
279 static struct haifa_sched_info ebb_sched_info = | |
280 { | |
281 init_ready_list, | |
282 NULL, | |
283 schedule_more_p, | |
284 NULL, | |
285 rank, | |
286 ebb_print_insn, | |
287 ebb_contributes_to_priority, | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
288 NULL, /* insn_finishes_block_p */ |
0 | 289 |
290 NULL, NULL, | |
291 NULL, NULL, | |
292 1, 0, | |
293 | |
294 ebb_add_remove_insn, | |
295 begin_schedule_ready, | |
111 | 296 begin_move_insn, |
0 | 297 advance_target_bb, |
111 | 298 |
299 save_ebb_state, | |
300 restore_ebb_state, | |
301 | |
0 | 302 SCHED_EBB |
303 /* We can create new blocks in begin_schedule_ready (). */ | |
304 | NEW_BBS | |
305 }; | |
306 | |
307 /* Returns the earliest block in EBB currently being processed where a | |
308 "similar load" 'insn2' is found, and hence LOAD_INSN can move | |
309 speculatively into the found block. All the following must hold: | |
310 | |
311 (1) both loads have 1 base register (PFREE_CANDIDATEs). | |
312 (2) load_insn and load2 have a def-use dependence upon | |
313 the same insn 'insn1'. | |
314 | |
315 From all these we can conclude that the two loads access memory | |
316 addresses that differ at most by a constant, and hence if moving | |
317 load_insn would cause an exception, it would have been caused by | |
318 load2 anyhow. | |
319 | |
320 The function uses list (given by LAST_BLOCK) of already processed | |
321 blocks in EBB. The list is formed in `add_deps_for_risky_insns'. */ | |
322 | |
323 static basic_block | |
324 earliest_block_with_similiar_load (basic_block last_block, rtx load_insn) | |
325 { | |
326 sd_iterator_def back_sd_it; | |
327 dep_t back_dep; | |
328 basic_block bb, earliest_block = NULL; | |
329 | |
330 FOR_EACH_DEP (load_insn, SD_LIST_BACK, back_sd_it, back_dep) | |
331 { | |
111 | 332 rtx_insn *insn1 = DEP_PRO (back_dep); |
0 | 333 |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
334 if (DEP_TYPE (back_dep) == REG_DEP_TRUE) |
0 | 335 /* Found a DEF-USE dependence (insn1, load_insn). */ |
336 { | |
337 sd_iterator_def fore_sd_it; | |
338 dep_t fore_dep; | |
339 | |
340 FOR_EACH_DEP (insn1, SD_LIST_FORW, fore_sd_it, fore_dep) | |
341 { | |
111 | 342 rtx_insn *insn2 = DEP_CON (fore_dep); |
0 | 343 basic_block insn2_block = BLOCK_FOR_INSN (insn2); |
344 | |
345 if (DEP_TYPE (fore_dep) == REG_DEP_TRUE) | |
346 { | |
347 if (earliest_block != NULL | |
348 && earliest_block->index < insn2_block->index) | |
349 continue; | |
350 | |
351 /* Found a DEF-USE dependence (insn1, insn2). */ | |
352 if (haifa_classify_insn (insn2) != PFREE_CANDIDATE) | |
353 /* insn2 not guaranteed to be a 1 base reg load. */ | |
354 continue; | |
355 | |
356 for (bb = last_block; bb; bb = (basic_block) bb->aux) | |
357 if (insn2_block == bb) | |
358 break; | |
359 | |
360 if (!bb) | |
361 /* insn2 is the similar load. */ | |
362 earliest_block = insn2_block; | |
363 } | |
364 } | |
365 } | |
366 } | |
367 | |
368 return earliest_block; | |
369 } | |
370 | |
371 /* The following function adds dependencies between jumps and risky | |
372 insns in given ebb. */ | |
373 | |
374 static void | |
111 | 375 add_deps_for_risky_insns (rtx_insn *head, rtx_insn *tail) |
0 | 376 { |
111 | 377 rtx_insn *insn, *prev; |
0 | 378 int classification; |
111 | 379 rtx_insn *last_jump = NULL; |
380 rtx_insn *next_tail = NEXT_INSN (tail); | |
0 | 381 basic_block last_block = NULL, bb; |
382 | |
383 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn)) | |
111 | 384 { |
385 add_delay_dependencies (insn); | |
386 if (control_flow_insn_p (insn)) | |
387 { | |
388 bb = BLOCK_FOR_INSN (insn); | |
389 bb->aux = last_block; | |
390 last_block = bb; | |
391 /* Ensure blocks stay in the same order. */ | |
392 if (last_jump) | |
393 add_dependence (insn, last_jump, REG_DEP_ANTI); | |
394 last_jump = insn; | |
395 } | |
396 else if (INSN_P (insn) && last_jump != NULL_RTX) | |
397 { | |
398 classification = haifa_classify_insn (insn); | |
399 prev = last_jump; | |
0 | 400 |
111 | 401 switch (classification) |
402 { | |
403 case PFREE_CANDIDATE: | |
404 if (flag_schedule_speculative_load) | |
405 { | |
406 bb = earliest_block_with_similiar_load (last_block, insn); | |
407 if (bb) | |
408 { | |
409 bb = (basic_block) bb->aux; | |
410 if (!bb) | |
411 break; | |
412 prev = BB_END (bb); | |
413 } | |
414 } | |
415 /* Fall through. */ | |
416 case TRAP_RISKY: | |
417 case IRISKY: | |
418 case PRISKY_CANDIDATE: | |
419 /* ??? We could implement better checking PRISKY_CANDIDATEs | |
420 analogous to sched-rgn.c. */ | |
145 | 421 /* We cannot change the mode of the backward |
111 | 422 dependency because REG_DEP_ANTI has the lowest |
423 rank. */ | |
424 if (! sched_insns_conditions_mutex_p (insn, prev)) | |
425 { | |
426 if ((current_sched_info->flags & DO_SPECULATION) | |
427 && (spec_info->mask & BEGIN_CONTROL)) | |
428 { | |
429 dep_def _dep, *dep = &_dep; | |
0 | 430 |
111 | 431 init_dep (dep, prev, insn, REG_DEP_ANTI); |
432 | |
433 if (current_sched_info->flags & USE_DEPS_LIST) | |
434 { | |
435 DEP_STATUS (dep) = set_dep_weak (DEP_ANTI, BEGIN_CONTROL, | |
436 MAX_DEP_WEAK); | |
0 | 437 |
111 | 438 } |
439 sd_add_or_update_dep (dep, false); | |
440 } | |
441 else | |
442 add_dependence (insn, prev, REG_DEP_CONTROL); | |
443 } | |
0 | 444 |
111 | 445 break; |
0 | 446 |
111 | 447 default: |
448 break; | |
449 } | |
450 } | |
451 } | |
0 | 452 /* Maintain the invariant that bb->aux is clear after use. */ |
453 while (last_block) | |
454 { | |
455 bb = (basic_block) last_block->aux; | |
456 last_block->aux = NULL; | |
457 last_block = bb; | |
458 } | |
459 } | |
460 | |
111 | 461 /* Schedule a single extended basic block, defined by the boundaries |
462 HEAD and TAIL. | |
0 | 463 |
111 | 464 We change our expectations about scheduler behavior depending on |
465 whether MODULO_SCHEDULING is true. If it is, we expect that the | |
466 caller has already called set_modulo_params and created delay pairs | |
467 as appropriate. If the modulo schedule failed, we return | |
468 NULL_RTX. */ | |
469 | |
470 basic_block | |
471 schedule_ebb (rtx_insn *head, rtx_insn *tail, bool modulo_scheduling) | |
0 | 472 { |
473 basic_block first_bb, target_bb; | |
145 | 474 class deps_desc tmp_deps; |
111 | 475 bool success; |
476 | |
477 /* Blah. We should fix the rest of the code not to get confused by | |
478 a note or two. */ | |
479 while (head != tail) | |
480 { | |
481 if (NOTE_P (head) || DEBUG_INSN_P (head)) | |
482 head = NEXT_INSN (head); | |
483 else if (NOTE_P (tail) || DEBUG_INSN_P (tail)) | |
484 tail = PREV_INSN (tail); | |
485 else if (LABEL_P (head)) | |
486 head = NEXT_INSN (head); | |
487 else | |
488 break; | |
489 } | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
490 |
0 | 491 first_bb = BLOCK_FOR_INSN (head); |
492 last_bb = BLOCK_FOR_INSN (tail); | |
493 | |
494 if (no_real_insns_p (head, tail)) | |
495 return BLOCK_FOR_INSN (tail); | |
496 | |
497 gcc_assert (INSN_P (head) && INSN_P (tail)); | |
498 | |
499 if (!bitmap_bit_p (&dont_calc_deps, first_bb->index)) | |
500 { | |
501 init_deps_global (); | |
502 | |
503 /* Compute dependencies. */ | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
504 init_deps (&tmp_deps, false); |
0 | 505 sched_analyze (&tmp_deps, head, tail); |
506 free_deps (&tmp_deps); | |
507 | |
508 add_deps_for_risky_insns (head, tail); | |
509 | |
510 if (targetm.sched.dependencies_evaluation_hook) | |
511 targetm.sched.dependencies_evaluation_hook (head, tail); | |
512 | |
513 finish_deps_global (); | |
514 } | |
515 else | |
516 /* Only recovery blocks can have their dependencies already calculated, | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
517 and they always are single block ebbs. */ |
0 | 518 gcc_assert (first_bb == last_bb); |
519 | |
520 /* Set priorities. */ | |
521 current_sched_info->sched_max_insns_priority = 0; | |
522 rgn_n_insns = set_priorities (head, tail); | |
523 current_sched_info->sched_max_insns_priority++; | |
524 | |
525 current_sched_info->prev_head = PREV_INSN (head); | |
526 current_sched_info->next_tail = NEXT_INSN (tail); | |
527 | |
528 remove_notes (head, tail); | |
529 | |
530 unlink_bb_notes (first_bb, last_bb); | |
531 | |
532 target_bb = first_bb; | |
533 | |
534 /* Make ready list big enough to hold all the instructions from the ebb. */ | |
535 sched_extend_ready_list (rgn_n_insns); | |
111 | 536 success = schedule_block (&target_bb, NULL); |
537 gcc_assert (success || modulo_scheduling); | |
538 | |
0 | 539 /* Free ready list. */ |
540 sched_finish_ready_list (); | |
541 | |
542 /* We might pack all instructions into fewer blocks, | |
543 so we may made some of them empty. Can't assert (b == last_bb). */ | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
544 |
0 | 545 /* Sanity check: verify that all region insns were scheduled. */ |
111 | 546 gcc_assert (modulo_scheduling || sched_rgn_n_insns == rgn_n_insns); |
0 | 547 |
548 /* Free dependencies. */ | |
549 sched_free_deps (current_sched_info->head, current_sched_info->tail, true); | |
550 | |
551 gcc_assert (haifa_recovery_bb_ever_added_p | |
552 || deps_pools_are_empty_p ()); | |
553 | |
554 if (EDGE_COUNT (last_bb->preds) == 0) | |
555 /* LAST_BB is unreachable. */ | |
556 { | |
557 gcc_assert (first_bb != last_bb | |
558 && EDGE_COUNT (last_bb->succs) == 0); | |
559 last_bb = last_bb->prev_bb; | |
560 delete_basic_block (last_bb->next_bb); | |
561 } | |
562 | |
111 | 563 return success ? last_bb : NULL; |
0 | 564 } |
565 | |
111 | 566 /* Perform initializations before running schedule_ebbs or a single |
567 schedule_ebb. */ | |
0 | 568 void |
111 | 569 schedule_ebbs_init (void) |
0 | 570 { |
571 /* Setup infos. */ | |
572 { | |
573 memcpy (&ebb_common_sched_info, &haifa_common_sched_info, | |
574 sizeof (ebb_common_sched_info)); | |
575 | |
576 ebb_common_sched_info.fix_recovery_cfg = ebb_fix_recovery_cfg; | |
577 ebb_common_sched_info.add_block = ebb_add_block; | |
578 ebb_common_sched_info.sched_pass_id = SCHED_EBB_PASS; | |
579 | |
580 common_sched_info = &ebb_common_sched_info; | |
581 sched_deps_info = &ebb_sched_deps_info; | |
582 current_sched_info = &ebb_sched_info; | |
583 } | |
584 | |
585 haifa_sched_init (); | |
586 | |
587 compute_bb_for_insn (); | |
588 | |
589 /* Initialize DONT_CALC_DEPS and ebb-{start, end} markers. */ | |
145 | 590 bitmap_initialize (&dont_calc_deps, &bitmap_default_obstack); |
111 | 591 } |
0 | 592 |
111 | 593 /* Perform cleanups after scheduling using schedules_ebbs or schedule_ebb. */ |
594 void | |
595 schedule_ebbs_finish (void) | |
596 { | |
145 | 597 bitmap_release (&dont_calc_deps); |
0 | 598 |
599 /* Reposition the prologue and epilogue notes in case we moved the | |
600 prologue/epilogue insns. */ | |
601 if (reload_completed) | |
602 reposition_prologue_and_epilogue_notes (); | |
603 | |
604 haifa_sched_finish (); | |
605 } | |
606 | |
111 | 607 /* The main entry point in this file. */ |
608 | |
609 void | |
610 schedule_ebbs (void) | |
611 { | |
612 basic_block bb; | |
613 int probability_cutoff; | |
614 rtx_insn *tail; | |
615 | |
616 /* Taking care of this degenerate case makes the rest of | |
617 this code simpler. */ | |
618 if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS) | |
619 return; | |
620 | |
621 if (profile_info && profile_status_for_fn (cfun) == PROFILE_READ) | |
145 | 622 probability_cutoff = param_tracer_min_branch_probability_feedback; |
111 | 623 else |
145 | 624 probability_cutoff = param_tracer_min_branch_probability; |
111 | 625 probability_cutoff = REG_BR_PROB_BASE / 100 * probability_cutoff; |
626 | |
627 schedule_ebbs_init (); | |
628 | |
629 /* Schedule every region in the subroutine. */ | |
630 FOR_EACH_BB_FN (bb, cfun) | |
631 { | |
632 rtx_insn *head = BB_HEAD (bb); | |
633 | |
634 if (bb->flags & BB_DISABLE_SCHEDULE) | |
635 continue; | |
636 | |
637 for (;;) | |
638 { | |
639 edge e; | |
640 tail = BB_END (bb); | |
641 if (bb->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun) | |
642 || LABEL_P (BB_HEAD (bb->next_bb))) | |
643 break; | |
644 e = find_fallthru_edge (bb->succs); | |
645 if (! e) | |
646 break; | |
647 if (e->probability.initialized_p () | |
648 && e->probability.to_reg_br_prob_base () <= probability_cutoff) | |
649 break; | |
650 if (e->dest->flags & BB_DISABLE_SCHEDULE) | |
651 break; | |
652 bb = bb->next_bb; | |
653 } | |
654 | |
655 bb = schedule_ebb (head, tail, false); | |
656 } | |
657 schedule_ebbs_finish (); | |
658 } | |
659 | |
0 | 660 /* INSN has been added to/removed from current ebb. */ |
661 static void | |
111 | 662 ebb_add_remove_insn (rtx_insn *insn ATTRIBUTE_UNUSED, int remove_p) |
0 | 663 { |
664 if (!remove_p) | |
665 rgn_n_insns++; | |
666 else | |
667 rgn_n_insns--; | |
668 } | |
669 | |
670 /* BB was added to ebb after AFTER. */ | |
671 static void | |
672 ebb_add_block (basic_block bb, basic_block after) | |
673 { | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
0
diff
changeset
|
674 /* Recovery blocks are always bounded by BARRIERS, |
0 | 675 therefore, they always form single block EBB, |
676 therefore, we can use rec->index to identify such EBBs. */ | |
111 | 677 if (after == EXIT_BLOCK_PTR_FOR_FN (cfun)) |
0 | 678 bitmap_set_bit (&dont_calc_deps, bb->index); |
679 else if (after == last_bb) | |
680 last_bb = bb; | |
681 } | |
682 | |
683 /* Return next block in ebb chain. For parameter meaning please refer to | |
684 sched-int.h: struct sched_info: advance_target_bb. */ | |
685 static basic_block | |
111 | 686 advance_target_bb (basic_block bb, rtx_insn *insn) |
0 | 687 { |
688 if (insn) | |
689 { | |
690 if (BLOCK_FOR_INSN (insn) != bb | |
691 && control_flow_insn_p (insn) | |
692 /* We handle interblock movement of the speculation check | |
693 or over a speculation check in | |
694 haifa-sched.c: move_block_after_check (). */ | |
695 && !IS_SPECULATION_BRANCHY_CHECK_P (insn) | |
696 && !IS_SPECULATION_BRANCHY_CHECK_P (BB_END (bb))) | |
697 { | |
698 /* Assert that we don't move jumps across blocks. */ | |
699 gcc_assert (!control_flow_insn_p (BB_END (bb)) | |
700 && NOTE_INSN_BASIC_BLOCK_P (BB_HEAD (bb->next_bb))); | |
701 return bb; | |
702 } | |
703 else | |
704 return 0; | |
705 } | |
706 else | |
707 /* Return next non empty block. */ | |
708 { | |
709 do | |
710 { | |
711 gcc_assert (bb != last_bb); | |
712 | |
713 bb = bb->next_bb; | |
714 } | |
715 while (bb_note (bb) == BB_END (bb)); | |
716 | |
717 return bb; | |
718 } | |
719 } | |
720 | |
721 /* Fix internal data after interblock movement of jump instruction. | |
722 For parameter meaning please refer to | |
723 sched-int.h: struct sched_info: fix_recovery_cfg. */ | |
724 static void | |
725 ebb_fix_recovery_cfg (int bbi ATTRIBUTE_UNUSED, int jump_bbi, | |
726 int jump_bb_nexti) | |
727 { | |
728 gcc_assert (last_bb->index != bbi); | |
729 | |
730 if (jump_bb_nexti == last_bb->index) | |
111 | 731 last_bb = BASIC_BLOCK_FOR_FN (cfun, jump_bbi); |
0 | 732 } |
733 | |
734 #endif /* INSN_SCHEDULING */ |