Mercurial > hg > CbC > CbC_gcc
comparison gcc/df-scan.c @ 0:a06113de4d67
first commit
author | kent <kent@cr.ie.u-ryukyu.ac.jp> |
---|---|
date | Fri, 17 Jul 2009 14:47:48 +0900 |
parents | |
children | 77e2b8dfacca |
comparison
equal
deleted
inserted
replaced
-1:000000000000 | 0:a06113de4d67 |
---|---|
1 /* Scanning of rtl for dataflow analysis. | |
2 Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, | |
3 2008, 2009 Free Software Foundation, Inc. | |
4 Originally contributed by Michael P. Hayes | |
5 (m.hayes@elec.canterbury.ac.nz, mhayes@redhat.com) | |
6 Major rewrite contributed by Danny Berlin (dberlin@dberlin.org) | |
7 and Kenneth Zadeck (zadeck@naturalbridge.com). | |
8 | |
9 This file is part of GCC. | |
10 | |
11 GCC is free software; you can redistribute it and/or modify it under | |
12 the terms of the GNU General Public License as published by the Free | |
13 Software Foundation; either version 3, or (at your option) any later | |
14 version. | |
15 | |
16 GCC is distributed in the hope that it will be useful, but WITHOUT ANY | |
17 WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
18 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
19 for more details. | |
20 | |
21 You should have received a copy of the GNU General Public License | |
22 along with GCC; see the file COPYING3. If not see | |
23 <http://www.gnu.org/licenses/>. */ | |
24 | |
25 #include "config.h" | |
26 #include "system.h" | |
27 #include "coretypes.h" | |
28 #include "tm.h" | |
29 #include "rtl.h" | |
30 #include "tm_p.h" | |
31 #include "insn-config.h" | |
32 #include "recog.h" | |
33 #include "function.h" | |
34 #include "regs.h" | |
35 #include "output.h" | |
36 #include "alloc-pool.h" | |
37 #include "flags.h" | |
38 #include "hard-reg-set.h" | |
39 #include "basic-block.h" | |
40 #include "sbitmap.h" | |
41 #include "bitmap.h" | |
42 #include "timevar.h" | |
43 #include "tree.h" | |
44 #include "target.h" | |
45 #include "target-def.h" | |
46 #include "df.h" | |
47 #include "tree-pass.h" | |
48 | |
49 #ifndef HAVE_epilogue | |
50 #define HAVE_epilogue 0 | |
51 #endif | |
52 #ifndef HAVE_prologue | |
53 #define HAVE_prologue 0 | |
54 #endif | |
55 #ifndef HAVE_sibcall_epilogue | |
56 #define HAVE_sibcall_epilogue 0 | |
57 #endif | |
58 | |
59 #ifndef EPILOGUE_USES | |
60 #define EPILOGUE_USES(REGNO) 0 | |
61 #endif | |
62 | |
63 /* The following two macros free the vecs that hold either the refs or | |
64 the mw refs. They are a little tricky because the vec has 0 | |
65 elements is special and is not to be freed. */ | |
66 #define df_scan_free_ref_vec(V) \ | |
67 do { \ | |
68 if (V && *V) \ | |
69 free (V); \ | |
70 } while (0) | |
71 | |
72 #define df_scan_free_mws_vec(V) \ | |
73 do { \ | |
74 if (V && *V) \ | |
75 free (V); \ | |
76 } while (0) | |
77 | |
78 /* The set of hard registers in eliminables[i].from. */ | |
79 | |
80 static HARD_REG_SET elim_reg_set; | |
81 | |
82 /* Initialize ur_in and ur_out as if all hard registers were partially | |
83 available. */ | |
84 | |
85 struct df_collection_rec | |
86 { | |
87 df_ref * def_vec; | |
88 df_ref * use_vec; | |
89 unsigned int next_def; | |
90 unsigned int next_use; | |
91 df_ref * eq_use_vec; | |
92 struct df_mw_hardreg **mw_vec; | |
93 unsigned int next_eq_use; | |
94 unsigned int next_mw; | |
95 }; | |
96 | |
97 static df_ref df_null_ref_rec[1]; | |
98 static struct df_mw_hardreg * df_null_mw_rec[1]; | |
99 | |
100 static void df_ref_record (enum df_ref_class, struct df_collection_rec *, | |
101 rtx, rtx *, | |
102 basic_block, struct df_insn_info *, | |
103 enum df_ref_type, enum df_ref_flags, | |
104 int, int, enum machine_mode); | |
105 static void df_def_record_1 (struct df_collection_rec *, rtx, | |
106 basic_block, struct df_insn_info *, | |
107 enum df_ref_flags); | |
108 static void df_defs_record (struct df_collection_rec *, rtx, | |
109 basic_block, struct df_insn_info *, | |
110 enum df_ref_flags); | |
111 static void df_uses_record (enum df_ref_class, struct df_collection_rec *, | |
112 rtx *, enum df_ref_type, | |
113 basic_block, struct df_insn_info *, | |
114 enum df_ref_flags, | |
115 int, int, enum machine_mode); | |
116 | |
117 static df_ref df_ref_create_structure (enum df_ref_class, | |
118 struct df_collection_rec *, rtx, rtx *, | |
119 basic_block, struct df_insn_info *, | |
120 enum df_ref_type, enum df_ref_flags, | |
121 int, int, enum machine_mode); | |
122 | |
123 static void df_insn_refs_collect (struct df_collection_rec*, | |
124 basic_block, struct df_insn_info *); | |
125 static void df_canonize_collection_rec (struct df_collection_rec *); | |
126 | |
127 static void df_get_regular_block_artificial_uses (bitmap); | |
128 static void df_get_eh_block_artificial_uses (bitmap); | |
129 | |
130 static void df_record_entry_block_defs (bitmap); | |
131 static void df_record_exit_block_uses (bitmap); | |
132 static void df_get_exit_block_use_set (bitmap); | |
133 static void df_get_entry_block_def_set (bitmap); | |
134 static void df_grow_ref_info (struct df_ref_info *, unsigned int); | |
135 static void df_ref_chain_delete_du_chain (df_ref *); | |
136 static void df_ref_chain_delete (df_ref *); | |
137 | |
138 static void df_refs_add_to_chains (struct df_collection_rec *, | |
139 basic_block, rtx); | |
140 | |
141 static bool df_insn_refs_verify (struct df_collection_rec *, basic_block, rtx, bool); | |
142 static void df_entry_block_defs_collect (struct df_collection_rec *, bitmap); | |
143 static void df_exit_block_uses_collect (struct df_collection_rec *, bitmap); | |
144 static void df_install_ref (df_ref, struct df_reg_info *, | |
145 struct df_ref_info *, bool); | |
146 | |
147 static int df_ref_compare (const void *, const void *); | |
148 static int df_mw_compare (const void *, const void *); | |
149 | |
150 /* Indexed by hardware reg number, is true if that register is ever | |
151 used in the current function. | |
152 | |
153 In df-scan.c, this is set up to record the hard regs used | |
154 explicitly. Reload adds in the hard regs used for holding pseudo | |
155 regs. Final uses it to generate the code in the function prologue | |
156 and epilogue to save and restore registers as needed. */ | |
157 | |
158 static bool regs_ever_live[FIRST_PSEUDO_REGISTER]; | |
159 | |
160 /*---------------------------------------------------------------------------- | |
161 SCANNING DATAFLOW PROBLEM | |
162 | |
163 There are several ways in which scanning looks just like the other | |
164 dataflow problems. It shares the all the mechanisms for local info | |
165 as well as basic block info. Where it differs is when and how often | |
166 it gets run. It also has no need for the iterative solver. | |
167 ----------------------------------------------------------------------------*/ | |
168 | |
169 /* Problem data for the scanning dataflow function. */ | |
170 struct df_scan_problem_data | |
171 { | |
172 alloc_pool ref_base_pool; | |
173 alloc_pool ref_artificial_pool; | |
174 alloc_pool ref_regular_pool; | |
175 alloc_pool ref_extract_pool; | |
176 alloc_pool insn_pool; | |
177 alloc_pool reg_pool; | |
178 alloc_pool mw_reg_pool; | |
179 bitmap_obstack reg_bitmaps; | |
180 bitmap_obstack insn_bitmaps; | |
181 }; | |
182 | |
183 typedef struct df_scan_bb_info *df_scan_bb_info_t; | |
184 | |
185 | |
186 /* Internal function to shut down the scanning problem. */ | |
187 static void | |
188 df_scan_free_internal (void) | |
189 { | |
190 struct df_scan_problem_data *problem_data | |
191 = (struct df_scan_problem_data *) df_scan->problem_data; | |
192 unsigned int i; | |
193 basic_block bb; | |
194 | |
195 /* The vectors that hold the refs are not pool allocated because | |
196 they come in many sizes. This makes them impossible to delete | |
197 all at once. */ | |
198 for (i = 0; i < DF_INSN_SIZE(); i++) | |
199 { | |
200 struct df_insn_info *insn_info = DF_INSN_UID_GET(i); | |
201 /* Skip the insns that have no insn_info or have been | |
202 deleted. */ | |
203 if (insn_info) | |
204 { | |
205 df_scan_free_ref_vec (insn_info->defs); | |
206 df_scan_free_ref_vec (insn_info->uses); | |
207 df_scan_free_ref_vec (insn_info->eq_uses); | |
208 df_scan_free_mws_vec (insn_info->mw_hardregs); | |
209 } | |
210 } | |
211 | |
212 FOR_ALL_BB (bb) | |
213 { | |
214 unsigned int bb_index = bb->index; | |
215 struct df_scan_bb_info *bb_info = df_scan_get_bb_info (bb_index); | |
216 if (bb_info) | |
217 { | |
218 df_scan_free_ref_vec (bb_info->artificial_defs); | |
219 df_scan_free_ref_vec (bb_info->artificial_uses); | |
220 } | |
221 } | |
222 | |
223 free (df->def_info.refs); | |
224 free (df->def_info.begin); | |
225 free (df->def_info.count); | |
226 memset (&df->def_info, 0, (sizeof (struct df_ref_info))); | |
227 | |
228 free (df->use_info.refs); | |
229 free (df->use_info.begin); | |
230 free (df->use_info.count); | |
231 memset (&df->use_info, 0, (sizeof (struct df_ref_info))); | |
232 | |
233 free (df->def_regs); | |
234 df->def_regs = NULL; | |
235 free (df->use_regs); | |
236 df->use_regs = NULL; | |
237 free (df->eq_use_regs); | |
238 df->eq_use_regs = NULL; | |
239 df->regs_size = 0; | |
240 DF_REG_SIZE(df) = 0; | |
241 | |
242 free (df->insns); | |
243 df->insns = NULL; | |
244 DF_INSN_SIZE () = 0; | |
245 | |
246 free (df_scan->block_info); | |
247 df_scan->block_info = NULL; | |
248 df_scan->block_info_size = 0; | |
249 | |
250 BITMAP_FREE (df->hardware_regs_used); | |
251 BITMAP_FREE (df->regular_block_artificial_uses); | |
252 BITMAP_FREE (df->eh_block_artificial_uses); | |
253 BITMAP_FREE (df->entry_block_defs); | |
254 BITMAP_FREE (df->exit_block_uses); | |
255 BITMAP_FREE (df->insns_to_delete); | |
256 BITMAP_FREE (df->insns_to_rescan); | |
257 BITMAP_FREE (df->insns_to_notes_rescan); | |
258 | |
259 free_alloc_pool (df_scan->block_pool); | |
260 free_alloc_pool (problem_data->ref_base_pool); | |
261 free_alloc_pool (problem_data->ref_artificial_pool); | |
262 free_alloc_pool (problem_data->ref_regular_pool); | |
263 free_alloc_pool (problem_data->ref_extract_pool); | |
264 free_alloc_pool (problem_data->insn_pool); | |
265 free_alloc_pool (problem_data->reg_pool); | |
266 free_alloc_pool (problem_data->mw_reg_pool); | |
267 bitmap_obstack_release (&problem_data->reg_bitmaps); | |
268 bitmap_obstack_release (&problem_data->insn_bitmaps); | |
269 free (df_scan->problem_data); | |
270 } | |
271 | |
272 | |
273 /* Set basic block info. */ | |
274 | |
275 static void | |
276 df_scan_set_bb_info (unsigned int index, | |
277 struct df_scan_bb_info *bb_info) | |
278 { | |
279 gcc_assert (df_scan); | |
280 df_grow_bb_info (df_scan); | |
281 df_scan->block_info[index] = (void *) bb_info; | |
282 } | |
283 | |
284 | |
285 /* Free basic block info. */ | |
286 | |
287 static void | |
288 df_scan_free_bb_info (basic_block bb, void *vbb_info) | |
289 { | |
290 struct df_scan_bb_info *bb_info = (struct df_scan_bb_info *) vbb_info; | |
291 unsigned int bb_index = bb->index; | |
292 if (bb_info) | |
293 { | |
294 rtx insn; | |
295 FOR_BB_INSNS (bb, insn) | |
296 { | |
297 if (INSN_P (insn)) | |
298 /* Record defs within INSN. */ | |
299 df_insn_delete (bb, INSN_UID (insn)); | |
300 } | |
301 | |
302 if (bb_index < df_scan->block_info_size) | |
303 bb_info = df_scan_get_bb_info (bb_index); | |
304 | |
305 /* Get rid of any artificial uses or defs. */ | |
306 df_ref_chain_delete_du_chain (bb_info->artificial_defs); | |
307 df_ref_chain_delete_du_chain (bb_info->artificial_uses); | |
308 df_ref_chain_delete (bb_info->artificial_defs); | |
309 df_ref_chain_delete (bb_info->artificial_uses); | |
310 bb_info->artificial_defs = NULL; | |
311 bb_info->artificial_uses = NULL; | |
312 pool_free (df_scan->block_pool, bb_info); | |
313 } | |
314 } | |
315 | |
316 | |
317 /* Allocate the problem data for the scanning problem. This should be | |
318 called when the problem is created or when the entire function is to | |
319 be rescanned. */ | |
320 void | |
321 df_scan_alloc (bitmap all_blocks ATTRIBUTE_UNUSED) | |
322 { | |
323 struct df_scan_problem_data *problem_data; | |
324 unsigned int insn_num = get_max_uid () + 1; | |
325 unsigned int block_size = 400; | |
326 basic_block bb; | |
327 | |
328 /* Given the number of pools, this is really faster than tearing | |
329 everything apart. */ | |
330 if (df_scan->problem_data) | |
331 df_scan_free_internal (); | |
332 | |
333 df_scan->block_pool | |
334 = create_alloc_pool ("df_scan_block pool", | |
335 sizeof (struct df_scan_bb_info), | |
336 block_size); | |
337 | |
338 problem_data = XNEW (struct df_scan_problem_data); | |
339 df_scan->problem_data = problem_data; | |
340 df_scan->computed = true; | |
341 | |
342 problem_data->ref_base_pool | |
343 = create_alloc_pool ("df_scan ref base", | |
344 sizeof (struct df_base_ref), block_size); | |
345 problem_data->ref_artificial_pool | |
346 = create_alloc_pool ("df_scan ref artificial", | |
347 sizeof (struct df_artificial_ref), block_size); | |
348 problem_data->ref_regular_pool | |
349 = create_alloc_pool ("df_scan ref regular", | |
350 sizeof (struct df_regular_ref), block_size); | |
351 problem_data->ref_extract_pool | |
352 = create_alloc_pool ("df_scan ref extract", | |
353 sizeof (struct df_extract_ref), block_size); | |
354 problem_data->insn_pool | |
355 = create_alloc_pool ("df_scan insn", | |
356 sizeof (struct df_insn_info), block_size); | |
357 problem_data->reg_pool | |
358 = create_alloc_pool ("df_scan reg", | |
359 sizeof (struct df_reg_info), block_size); | |
360 problem_data->mw_reg_pool | |
361 = create_alloc_pool ("df_scan mw_reg", | |
362 sizeof (struct df_mw_hardreg), block_size); | |
363 | |
364 bitmap_obstack_initialize (&problem_data->reg_bitmaps); | |
365 bitmap_obstack_initialize (&problem_data->insn_bitmaps); | |
366 | |
367 insn_num += insn_num / 4; | |
368 df_grow_reg_info (); | |
369 | |
370 df_grow_insn_info (); | |
371 df_grow_bb_info (df_scan); | |
372 | |
373 FOR_ALL_BB (bb) | |
374 { | |
375 unsigned int bb_index = bb->index; | |
376 struct df_scan_bb_info *bb_info = df_scan_get_bb_info (bb_index); | |
377 if (!bb_info) | |
378 { | |
379 bb_info = (struct df_scan_bb_info *) pool_alloc (df_scan->block_pool); | |
380 df_scan_set_bb_info (bb_index, bb_info); | |
381 } | |
382 bb_info->artificial_defs = NULL; | |
383 bb_info->artificial_uses = NULL; | |
384 } | |
385 | |
386 df->hardware_regs_used = BITMAP_ALLOC (&problem_data->reg_bitmaps); | |
387 df->regular_block_artificial_uses = BITMAP_ALLOC (&problem_data->reg_bitmaps); | |
388 df->eh_block_artificial_uses = BITMAP_ALLOC (&problem_data->reg_bitmaps); | |
389 df->entry_block_defs = BITMAP_ALLOC (&problem_data->reg_bitmaps); | |
390 df->exit_block_uses = BITMAP_ALLOC (&problem_data->reg_bitmaps); | |
391 df->insns_to_delete = BITMAP_ALLOC (&problem_data->insn_bitmaps); | |
392 df->insns_to_rescan = BITMAP_ALLOC (&problem_data->insn_bitmaps); | |
393 df->insns_to_notes_rescan = BITMAP_ALLOC (&problem_data->insn_bitmaps); | |
394 df_scan->optional_p = false; | |
395 } | |
396 | |
397 | |
398 /* Free all of the data associated with the scan problem. */ | |
399 | |
400 static void | |
401 df_scan_free (void) | |
402 { | |
403 if (df_scan->problem_data) | |
404 df_scan_free_internal (); | |
405 | |
406 if (df->blocks_to_analyze) | |
407 { | |
408 BITMAP_FREE (df->blocks_to_analyze); | |
409 df->blocks_to_analyze = NULL; | |
410 } | |
411 | |
412 free (df_scan); | |
413 } | |
414 | |
415 /* Dump the preamble for DF_SCAN dump. */ | |
416 static void | |
417 df_scan_start_dump (FILE *file ATTRIBUTE_UNUSED) | |
418 { | |
419 int i; | |
420 int dcount = 0; | |
421 int ucount = 0; | |
422 int ecount = 0; | |
423 int icount = 0; | |
424 int ccount = 0; | |
425 basic_block bb; | |
426 rtx insn; | |
427 | |
428 fprintf (file, ";; invalidated by call \t"); | |
429 df_print_regset (file, regs_invalidated_by_call_regset); | |
430 fprintf (file, ";; hardware regs used \t"); | |
431 df_print_regset (file, df->hardware_regs_used); | |
432 fprintf (file, ";; regular block artificial uses \t"); | |
433 df_print_regset (file, df->regular_block_artificial_uses); | |
434 fprintf (file, ";; eh block artificial uses \t"); | |
435 df_print_regset (file, df->eh_block_artificial_uses); | |
436 fprintf (file, ";; entry block defs \t"); | |
437 df_print_regset (file, df->entry_block_defs); | |
438 fprintf (file, ";; exit block uses \t"); | |
439 df_print_regset (file, df->exit_block_uses); | |
440 fprintf (file, ";; regs ever live \t"); | |
441 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) | |
442 if (df_regs_ever_live_p (i)) | |
443 fprintf (file, " %d[%s]", i, reg_names[i]); | |
444 fprintf (file, "\n;; ref usage \t"); | |
445 | |
446 for (i = 0; i < (int)df->regs_inited; i++) | |
447 if (DF_REG_DEF_COUNT (i) || DF_REG_USE_COUNT (i) || DF_REG_EQ_USE_COUNT (i)) | |
448 { | |
449 const char * sep = ""; | |
450 | |
451 fprintf (file, "r%d={", i); | |
452 if (DF_REG_DEF_COUNT (i)) | |
453 { | |
454 fprintf (file, "%dd", DF_REG_DEF_COUNT (i)); | |
455 sep = ","; | |
456 dcount += DF_REG_DEF_COUNT (i); | |
457 } | |
458 if (DF_REG_USE_COUNT (i)) | |
459 { | |
460 fprintf (file, "%s%du", sep, DF_REG_USE_COUNT (i)); | |
461 sep = ","; | |
462 ucount += DF_REG_USE_COUNT (i); | |
463 } | |
464 if (DF_REG_EQ_USE_COUNT (i)) | |
465 { | |
466 fprintf (file, "%s%dd", sep, DF_REG_EQ_USE_COUNT (i)); | |
467 ecount += DF_REG_EQ_USE_COUNT (i); | |
468 } | |
469 fprintf (file, "} "); | |
470 } | |
471 | |
472 FOR_EACH_BB (bb) | |
473 FOR_BB_INSNS (bb, insn) | |
474 if (INSN_P (insn)) | |
475 { | |
476 if (CALL_P (insn)) | |
477 ccount++; | |
478 else | |
479 icount++; | |
480 } | |
481 | |
482 fprintf (file, "\n;; total ref usage %d{%dd,%du,%de} in %d{%d regular + %d call} insns.\n", | |
483 dcount + ucount + ecount, dcount, ucount, ecount, icount + ccount, icount, ccount); | |
484 } | |
485 | |
486 /* Dump the bb_info for a given basic block. */ | |
487 static void | |
488 df_scan_start_block (basic_block bb, FILE *file) | |
489 { | |
490 struct df_scan_bb_info *bb_info | |
491 = df_scan_get_bb_info (bb->index); | |
492 | |
493 if (bb_info) | |
494 { | |
495 fprintf (file, ";; bb %d artificial_defs: ", bb->index); | |
496 df_refs_chain_dump (bb_info->artificial_defs, true, file); | |
497 fprintf (file, "\n;; bb %d artificial_uses: ", bb->index); | |
498 df_refs_chain_dump (bb_info->artificial_uses, true, file); | |
499 fprintf (file, "\n"); | |
500 } | |
501 #if 0 | |
502 { | |
503 rtx insn; | |
504 FOR_BB_INSNS (bb, insn) | |
505 if (INSN_P (insn)) | |
506 df_insn_debug (insn, false, file); | |
507 } | |
508 #endif | |
509 } | |
510 | |
511 static struct df_problem problem_SCAN = | |
512 { | |
513 DF_SCAN, /* Problem id. */ | |
514 DF_NONE, /* Direction. */ | |
515 df_scan_alloc, /* Allocate the problem specific data. */ | |
516 NULL, /* Reset global information. */ | |
517 df_scan_free_bb_info, /* Free basic block info. */ | |
518 NULL, /* Local compute function. */ | |
519 NULL, /* Init the solution specific data. */ | |
520 NULL, /* Iterative solver. */ | |
521 NULL, /* Confluence operator 0. */ | |
522 NULL, /* Confluence operator n. */ | |
523 NULL, /* Transfer function. */ | |
524 NULL, /* Finalize function. */ | |
525 df_scan_free, /* Free all of the problem information. */ | |
526 NULL, /* Remove this problem from the stack of dataflow problems. */ | |
527 df_scan_start_dump, /* Debugging. */ | |
528 df_scan_start_block, /* Debugging start block. */ | |
529 NULL, /* Debugging end block. */ | |
530 NULL, /* Incremental solution verify start. */ | |
531 NULL, /* Incremental solution verify end. */ | |
532 NULL, /* Dependent problem. */ | |
533 TV_DF_SCAN, /* Timing variable. */ | |
534 false /* Reset blocks on dropping out of blocks_to_analyze. */ | |
535 }; | |
536 | |
537 | |
538 /* Create a new DATAFLOW instance and add it to an existing instance | |
539 of DF. The returned structure is what is used to get at the | |
540 solution. */ | |
541 | |
542 void | |
543 df_scan_add_problem (void) | |
544 { | |
545 df_add_problem (&problem_SCAN); | |
546 } | |
547 | |
548 | |
549 /*---------------------------------------------------------------------------- | |
550 Storage Allocation Utilities | |
551 ----------------------------------------------------------------------------*/ | |
552 | |
553 | |
554 /* First, grow the reg_info information. If the current size is less than | |
555 the number of pseudos, grow to 25% more than the number of | |
556 pseudos. | |
557 | |
558 Second, assure that all of the slots up to max_reg_num have been | |
559 filled with reg_info structures. */ | |
560 | |
561 void | |
562 df_grow_reg_info (void) | |
563 { | |
564 unsigned int max_reg = max_reg_num (); | |
565 unsigned int new_size = max_reg; | |
566 struct df_scan_problem_data *problem_data | |
567 = (struct df_scan_problem_data *) df_scan->problem_data; | |
568 unsigned int i; | |
569 | |
570 if (df->regs_size < new_size) | |
571 { | |
572 new_size += new_size / 4; | |
573 df->def_regs = XRESIZEVEC (struct df_reg_info *, df->def_regs, new_size); | |
574 df->use_regs = XRESIZEVEC (struct df_reg_info *, df->use_regs, new_size); | |
575 df->eq_use_regs = XRESIZEVEC (struct df_reg_info *, df->eq_use_regs, | |
576 new_size); | |
577 df->def_info.begin = XRESIZEVEC (unsigned, df->def_info.begin, new_size); | |
578 df->def_info.count = XRESIZEVEC (unsigned, df->def_info.count, new_size); | |
579 df->use_info.begin = XRESIZEVEC (unsigned, df->use_info.begin, new_size); | |
580 df->use_info.count = XRESIZEVEC (unsigned, df->use_info.count, new_size); | |
581 df->regs_size = new_size; | |
582 } | |
583 | |
584 for (i = df->regs_inited; i < max_reg; i++) | |
585 { | |
586 struct df_reg_info *reg_info; | |
587 | |
588 reg_info = (struct df_reg_info *) pool_alloc (problem_data->reg_pool); | |
589 memset (reg_info, 0, sizeof (struct df_reg_info)); | |
590 df->def_regs[i] = reg_info; | |
591 reg_info = (struct df_reg_info *) pool_alloc (problem_data->reg_pool); | |
592 memset (reg_info, 0, sizeof (struct df_reg_info)); | |
593 df->use_regs[i] = reg_info; | |
594 reg_info = (struct df_reg_info *) pool_alloc (problem_data->reg_pool); | |
595 memset (reg_info, 0, sizeof (struct df_reg_info)); | |
596 df->eq_use_regs[i] = reg_info; | |
597 df->def_info.begin[i] = 0; | |
598 df->def_info.count[i] = 0; | |
599 df->use_info.begin[i] = 0; | |
600 df->use_info.count[i] = 0; | |
601 } | |
602 | |
603 df->regs_inited = max_reg; | |
604 } | |
605 | |
606 | |
607 /* Grow the ref information. */ | |
608 | |
609 static void | |
610 df_grow_ref_info (struct df_ref_info *ref_info, unsigned int new_size) | |
611 { | |
612 if (ref_info->refs_size < new_size) | |
613 { | |
614 ref_info->refs = XRESIZEVEC (df_ref, ref_info->refs, new_size); | |
615 memset (ref_info->refs + ref_info->refs_size, 0, | |
616 (new_size - ref_info->refs_size) *sizeof (df_ref)); | |
617 ref_info->refs_size = new_size; | |
618 } | |
619 } | |
620 | |
621 | |
622 /* Check and grow the ref information if necessary. This routine | |
623 guarantees total_size + BITMAP_ADDEND amount of entries in refs | |
624 array. It updates ref_info->refs_size only and does not change | |
625 ref_info->total_size. */ | |
626 | |
627 static void | |
628 df_check_and_grow_ref_info (struct df_ref_info *ref_info, | |
629 unsigned bitmap_addend) | |
630 { | |
631 if (ref_info->refs_size < ref_info->total_size + bitmap_addend) | |
632 { | |
633 int new_size = ref_info->total_size + bitmap_addend; | |
634 new_size += ref_info->total_size / 4; | |
635 df_grow_ref_info (ref_info, new_size); | |
636 } | |
637 } | |
638 | |
639 | |
640 /* Grow the ref information. If the current size is less than the | |
641 number of instructions, grow to 25% more than the number of | |
642 instructions. */ | |
643 | |
644 void | |
645 df_grow_insn_info (void) | |
646 { | |
647 unsigned int new_size = get_max_uid () + 1; | |
648 if (DF_INSN_SIZE () < new_size) | |
649 { | |
650 new_size += new_size / 4; | |
651 df->insns = XRESIZEVEC (struct df_insn_info *, df->insns, new_size); | |
652 memset (df->insns + df->insns_size, 0, | |
653 (new_size - DF_INSN_SIZE ()) *sizeof (struct df_insn_info *)); | |
654 DF_INSN_SIZE () = new_size; | |
655 } | |
656 } | |
657 | |
658 | |
659 | |
660 | |
661 /*---------------------------------------------------------------------------- | |
662 PUBLIC INTERFACES FOR SMALL GRAIN CHANGES TO SCANNING. | |
663 ----------------------------------------------------------------------------*/ | |
664 | |
665 /* Rescan all of the block_to_analyze or all of the blocks in the | |
666 function if df_set_blocks if blocks_to_analyze is NULL; */ | |
667 | |
668 void | |
669 df_scan_blocks (void) | |
670 { | |
671 basic_block bb; | |
672 | |
673 df->def_info.ref_order = DF_REF_ORDER_NO_TABLE; | |
674 df->use_info.ref_order = DF_REF_ORDER_NO_TABLE; | |
675 | |
676 df_get_regular_block_artificial_uses (df->regular_block_artificial_uses); | |
677 df_get_eh_block_artificial_uses (df->eh_block_artificial_uses); | |
678 | |
679 bitmap_ior_into (df->eh_block_artificial_uses, | |
680 df->regular_block_artificial_uses); | |
681 | |
682 /* ENTRY and EXIT blocks have special defs/uses. */ | |
683 df_get_entry_block_def_set (df->entry_block_defs); | |
684 df_record_entry_block_defs (df->entry_block_defs); | |
685 df_get_exit_block_use_set (df->exit_block_uses); | |
686 df_record_exit_block_uses (df->exit_block_uses); | |
687 df_set_bb_dirty (BASIC_BLOCK (ENTRY_BLOCK)); | |
688 df_set_bb_dirty (BASIC_BLOCK (EXIT_BLOCK)); | |
689 | |
690 /* Regular blocks */ | |
691 FOR_EACH_BB (bb) | |
692 { | |
693 unsigned int bb_index = bb->index; | |
694 df_bb_refs_record (bb_index, true); | |
695 } | |
696 } | |
697 | |
698 | |
699 /* Create a new ref of type DF_REF_TYPE for register REG at address | |
700 LOC within INSN of BB. This function is only used externally. | |
701 | |
702 If the REF_FLAGS field contain DF_REF_SIGN_EXTRACT or | |
703 DF_REF_ZERO_EXTRACT. WIDTH, OFFSET and MODE are used to access the | |
704 fields if they were constants. Otherwise they should be -1 if | |
705 those flags were set. */ | |
706 | |
707 df_ref | |
708 df_ref_create (rtx reg, rtx *loc, rtx insn, | |
709 basic_block bb, | |
710 enum df_ref_type ref_type, | |
711 enum df_ref_flags ref_flags, | |
712 int width, int offset, enum machine_mode mode) | |
713 { | |
714 df_ref ref; | |
715 struct df_reg_info **reg_info; | |
716 struct df_ref_info *ref_info; | |
717 df_ref *ref_rec; | |
718 df_ref **ref_rec_ptr; | |
719 unsigned int count = 0; | |
720 bool add_to_table; | |
721 enum df_ref_class cl; | |
722 | |
723 df_grow_reg_info (); | |
724 | |
725 /* You cannot hack artificial refs. */ | |
726 gcc_assert (insn); | |
727 | |
728 if (width != -1 || offset != -1) | |
729 cl = DF_REF_EXTRACT; | |
730 else if (loc) | |
731 cl = DF_REF_REGULAR; | |
732 else | |
733 cl = DF_REF_BASE; | |
734 ref = df_ref_create_structure (cl, NULL, reg, loc, bb, DF_INSN_INFO_GET (insn), | |
735 ref_type, ref_flags, | |
736 width, offset, mode); | |
737 | |
738 if (DF_REF_REG_DEF_P (ref)) | |
739 { | |
740 reg_info = df->def_regs; | |
741 ref_info = &df->def_info; | |
742 ref_rec_ptr = &DF_INSN_DEFS (insn); | |
743 add_to_table = ref_info->ref_order != DF_REF_ORDER_NO_TABLE; | |
744 } | |
745 else if (DF_REF_FLAGS (ref) & DF_REF_IN_NOTE) | |
746 { | |
747 reg_info = df->eq_use_regs; | |
748 ref_info = &df->use_info; | |
749 ref_rec_ptr = &DF_INSN_EQ_USES (insn); | |
750 switch (ref_info->ref_order) | |
751 { | |
752 case DF_REF_ORDER_UNORDERED_WITH_NOTES: | |
753 case DF_REF_ORDER_BY_REG_WITH_NOTES: | |
754 case DF_REF_ORDER_BY_INSN_WITH_NOTES: | |
755 add_to_table = true; | |
756 break; | |
757 default: | |
758 add_to_table = false; | |
759 break; | |
760 } | |
761 } | |
762 else | |
763 { | |
764 reg_info = df->use_regs; | |
765 ref_info = &df->use_info; | |
766 ref_rec_ptr = &DF_INSN_USES (insn); | |
767 add_to_table = ref_info->ref_order != DF_REF_ORDER_NO_TABLE; | |
768 } | |
769 | |
770 /* Do not add if ref is not in the right blocks. */ | |
771 if (add_to_table && df->analyze_subset) | |
772 add_to_table = bitmap_bit_p (df->blocks_to_analyze, bb->index); | |
773 | |
774 df_install_ref (ref, reg_info[DF_REF_REGNO (ref)], ref_info, add_to_table); | |
775 | |
776 if (add_to_table) | |
777 switch (ref_info->ref_order) | |
778 { | |
779 case DF_REF_ORDER_UNORDERED_WITH_NOTES: | |
780 case DF_REF_ORDER_BY_REG_WITH_NOTES: | |
781 case DF_REF_ORDER_BY_INSN_WITH_NOTES: | |
782 ref_info->ref_order = DF_REF_ORDER_UNORDERED_WITH_NOTES; | |
783 break; | |
784 default: | |
785 ref_info->ref_order = DF_REF_ORDER_UNORDERED; | |
786 break; | |
787 } | |
788 | |
789 ref_rec = *ref_rec_ptr; | |
790 while (*ref_rec) | |
791 { | |
792 count++; | |
793 ref_rec++; | |
794 } | |
795 | |
796 ref_rec = *ref_rec_ptr; | |
797 if (count) | |
798 { | |
799 ref_rec = XRESIZEVEC (df_ref, ref_rec, count+2); | |
800 *ref_rec_ptr = ref_rec; | |
801 ref_rec[count] = ref; | |
802 ref_rec[count+1] = NULL; | |
803 qsort (ref_rec, count + 1, sizeof (df_ref), df_ref_compare); | |
804 } | |
805 else | |
806 { | |
807 df_ref *ref_rec = XNEWVEC (df_ref, 2); | |
808 ref_rec[0] = ref; | |
809 ref_rec[1] = NULL; | |
810 *ref_rec_ptr = ref_rec; | |
811 } | |
812 | |
813 #if 0 | |
814 if (dump_file) | |
815 { | |
816 fprintf (dump_file, "adding ref "); | |
817 df_ref_debug (ref, dump_file); | |
818 } | |
819 #endif | |
820 /* By adding the ref directly, df_insn_rescan my not find any | |
821 differences even though the block will have changed. So we need | |
822 to mark the block dirty ourselves. */ | |
823 df_set_bb_dirty (bb); | |
824 | |
825 return ref; | |
826 } | |
827 | |
828 | |
829 | |
830 /*---------------------------------------------------------------------------- | |
831 UTILITIES TO CREATE AND DESTROY REFS AND CHAINS. | |
832 ----------------------------------------------------------------------------*/ | |
833 | |
834 static void | |
835 df_free_ref (df_ref ref) | |
836 { | |
837 struct df_scan_problem_data *problem_data | |
838 = (struct df_scan_problem_data *) df_scan->problem_data; | |
839 | |
840 switch (DF_REF_CLASS (ref)) | |
841 { | |
842 case DF_REF_BASE: | |
843 pool_free (problem_data->ref_base_pool, ref); | |
844 break; | |
845 | |
846 case DF_REF_ARTIFICIAL: | |
847 pool_free (problem_data->ref_artificial_pool, ref); | |
848 break; | |
849 | |
850 case DF_REF_REGULAR: | |
851 pool_free (problem_data->ref_regular_pool, ref); | |
852 break; | |
853 | |
854 case DF_REF_EXTRACT: | |
855 pool_free (problem_data->ref_extract_pool, ref); | |
856 break; | |
857 } | |
858 } | |
859 | |
860 | |
861 /* Unlink and delete REF at the reg_use, reg_eq_use or reg_def chain. | |
862 Also delete the def-use or use-def chain if it exists. */ | |
863 | |
864 static void | |
865 df_reg_chain_unlink (df_ref ref) | |
866 { | |
867 df_ref next = DF_REF_NEXT_REG (ref); | |
868 df_ref prev = DF_REF_PREV_REG (ref); | |
869 int id = DF_REF_ID (ref); | |
870 struct df_reg_info *reg_info; | |
871 df_ref *refs = NULL; | |
872 | |
873 if (DF_REF_REG_DEF_P (ref)) | |
874 { | |
875 int regno = DF_REF_REGNO (ref); | |
876 reg_info = DF_REG_DEF_GET (regno); | |
877 refs = df->def_info.refs; | |
878 } | |
879 else | |
880 { | |
881 if (DF_REF_FLAGS (ref) & DF_REF_IN_NOTE) | |
882 { | |
883 reg_info = DF_REG_EQ_USE_GET (DF_REF_REGNO (ref)); | |
884 switch (df->use_info.ref_order) | |
885 { | |
886 case DF_REF_ORDER_UNORDERED_WITH_NOTES: | |
887 case DF_REF_ORDER_BY_REG_WITH_NOTES: | |
888 case DF_REF_ORDER_BY_INSN_WITH_NOTES: | |
889 refs = df->use_info.refs; | |
890 break; | |
891 default: | |
892 break; | |
893 } | |
894 } | |
895 else | |
896 { | |
897 reg_info = DF_REG_USE_GET (DF_REF_REGNO (ref)); | |
898 refs = df->use_info.refs; | |
899 } | |
900 } | |
901 | |
902 if (refs) | |
903 { | |
904 if (df->analyze_subset) | |
905 { | |
906 if (bitmap_bit_p (df->blocks_to_analyze, DF_REF_BBNO (ref))) | |
907 refs[id] = NULL; | |
908 } | |
909 else | |
910 refs[id] = NULL; | |
911 } | |
912 | |
913 /* Delete any def-use or use-def chains that start here. It is | |
914 possible that there is trash in this field. This happens for | |
915 insns that have been deleted when rescanning has been deferred | |
916 and the chain problem has also been deleted. The chain tear down | |
917 code skips deleted insns. */ | |
918 if (df_chain && DF_REF_CHAIN (ref)) | |
919 df_chain_unlink (ref); | |
920 | |
921 reg_info->n_refs--; | |
922 if (DF_REF_FLAGS_IS_SET (ref, DF_HARD_REG_LIVE)) | |
923 { | |
924 gcc_assert (DF_REF_REGNO (ref) < FIRST_PSEUDO_REGISTER); | |
925 df->hard_regs_live_count[DF_REF_REGNO (ref)]--; | |
926 } | |
927 | |
928 /* Unlink from the reg chain. If there is no prev, this is the | |
929 first of the list. If not, just join the next and prev. */ | |
930 if (prev) | |
931 DF_REF_NEXT_REG (prev) = next; | |
932 else | |
933 { | |
934 gcc_assert (reg_info->reg_chain == ref); | |
935 reg_info->reg_chain = next; | |
936 } | |
937 if (next) | |
938 DF_REF_PREV_REG (next) = prev; | |
939 | |
940 df_free_ref (ref); | |
941 } | |
942 | |
943 | |
944 /* Remove REF from VEC. */ | |
945 | |
946 static void | |
947 df_ref_compress_rec (df_ref **vec_ptr, df_ref ref) | |
948 { | |
949 df_ref *vec = *vec_ptr; | |
950 | |
951 if (vec[1]) | |
952 { | |
953 while (*vec && *vec != ref) | |
954 vec++; | |
955 | |
956 while (*vec) | |
957 { | |
958 *vec = *(vec+1); | |
959 vec++; | |
960 } | |
961 } | |
962 else | |
963 { | |
964 free (vec); | |
965 *vec_ptr = df_null_ref_rec; | |
966 } | |
967 } | |
968 | |
969 | |
970 /* Unlink REF from all def-use/use-def chains, etc. */ | |
971 | |
972 void | |
973 df_ref_remove (df_ref ref) | |
974 { | |
975 #if 0 | |
976 if (dump_file) | |
977 { | |
978 fprintf (dump_file, "removing ref "); | |
979 df_ref_debug (ref, dump_file); | |
980 } | |
981 #endif | |
982 | |
983 if (DF_REF_REG_DEF_P (ref)) | |
984 { | |
985 if (DF_REF_IS_ARTIFICIAL (ref)) | |
986 { | |
987 struct df_scan_bb_info *bb_info | |
988 = df_scan_get_bb_info (DF_REF_BBNO (ref)); | |
989 df_ref_compress_rec (&bb_info->artificial_defs, ref); | |
990 } | |
991 else | |
992 { | |
993 unsigned int uid = DF_REF_INSN_UID (ref); | |
994 struct df_insn_info *insn_rec = DF_INSN_UID_GET (uid); | |
995 df_ref_compress_rec (&insn_rec->defs, ref); | |
996 } | |
997 } | |
998 else | |
999 { | |
1000 if (DF_REF_IS_ARTIFICIAL (ref)) | |
1001 { | |
1002 struct df_scan_bb_info *bb_info | |
1003 = df_scan_get_bb_info (DF_REF_BBNO (ref)); | |
1004 df_ref_compress_rec (&bb_info->artificial_uses, ref); | |
1005 } | |
1006 else | |
1007 { | |
1008 unsigned int uid = DF_REF_INSN_UID (ref); | |
1009 struct df_insn_info *insn_rec = DF_INSN_UID_GET (uid); | |
1010 | |
1011 if (DF_REF_FLAGS (ref) & DF_REF_IN_NOTE) | |
1012 df_ref_compress_rec (&insn_rec->eq_uses, ref); | |
1013 else | |
1014 df_ref_compress_rec (&insn_rec->uses, ref); | |
1015 } | |
1016 } | |
1017 | |
1018 /* By deleting the ref directly, df_insn_rescan my not find any | |
1019 differences even though the block will have changed. So we need | |
1020 to mark the block dirty ourselves. */ | |
1021 df_set_bb_dirty (DF_REF_BB (ref)); | |
1022 df_reg_chain_unlink (ref); | |
1023 } | |
1024 | |
1025 | |
1026 /* Create the insn record for INSN. If there was one there, zero it | |
1027 out. */ | |
1028 | |
1029 struct df_insn_info * | |
1030 df_insn_create_insn_record (rtx insn) | |
1031 { | |
1032 struct df_scan_problem_data *problem_data | |
1033 = (struct df_scan_problem_data *) df_scan->problem_data; | |
1034 struct df_insn_info *insn_rec; | |
1035 | |
1036 df_grow_insn_info (); | |
1037 insn_rec = DF_INSN_INFO_GET (insn); | |
1038 if (!insn_rec) | |
1039 { | |
1040 insn_rec = (struct df_insn_info *) pool_alloc (problem_data->insn_pool); | |
1041 DF_INSN_INFO_SET (insn, insn_rec); | |
1042 } | |
1043 memset (insn_rec, 0, sizeof (struct df_insn_info)); | |
1044 insn_rec->insn = insn; | |
1045 return insn_rec; | |
1046 } | |
1047 | |
1048 | |
1049 /* Delete all du chain (DF_REF_CHAIN()) of all refs in the ref chain. */ | |
1050 | |
1051 static void | |
1052 df_ref_chain_delete_du_chain (df_ref *ref_rec) | |
1053 { | |
1054 while (*ref_rec) | |
1055 { | |
1056 df_ref ref = *ref_rec; | |
1057 /* CHAIN is allocated by DF_CHAIN. So make sure to | |
1058 pass df_scan instance for the problem. */ | |
1059 if (DF_REF_CHAIN (ref)) | |
1060 df_chain_unlink (ref); | |
1061 ref_rec++; | |
1062 } | |
1063 } | |
1064 | |
1065 | |
1066 /* Delete all refs in the ref chain. */ | |
1067 | |
1068 static void | |
1069 df_ref_chain_delete (df_ref *ref_rec) | |
1070 { | |
1071 df_ref *start = ref_rec; | |
1072 while (*ref_rec) | |
1073 { | |
1074 df_reg_chain_unlink (*ref_rec); | |
1075 ref_rec++; | |
1076 } | |
1077 | |
1078 /* If the list is empty, it has a special shared element that is not | |
1079 to be deleted. */ | |
1080 if (*start) | |
1081 free (start); | |
1082 } | |
1083 | |
1084 | |
1085 /* Delete the hardreg chain. */ | |
1086 | |
1087 static void | |
1088 df_mw_hardreg_chain_delete (struct df_mw_hardreg **hardregs) | |
1089 { | |
1090 struct df_scan_problem_data *problem_data; | |
1091 | |
1092 if (!hardregs) | |
1093 return; | |
1094 | |
1095 problem_data = (struct df_scan_problem_data *) df_scan->problem_data; | |
1096 | |
1097 while (*hardregs) | |
1098 { | |
1099 pool_free (problem_data->mw_reg_pool, *hardregs); | |
1100 hardregs++; | |
1101 } | |
1102 } | |
1103 | |
1104 | |
1105 /* Delete all of the refs information from INSN. BB must be passed in | |
1106 except when called from df_process_deferred_rescans to mark the block | |
1107 as dirty. */ | |
1108 | |
1109 void | |
1110 df_insn_delete (basic_block bb, unsigned int uid) | |
1111 { | |
1112 struct df_insn_info *insn_info = NULL; | |
1113 if (!df) | |
1114 return; | |
1115 | |
1116 df_grow_bb_info (df_scan); | |
1117 df_grow_reg_info (); | |
1118 | |
1119 /* The block must be marked as dirty now, rather than later as in | |
1120 df_insn_rescan and df_notes_rescan because it may not be there at | |
1121 rescanning time and the mark would blow up. */ | |
1122 if (bb) | |
1123 df_set_bb_dirty (bb); | |
1124 | |
1125 insn_info = DF_INSN_UID_SAFE_GET (uid); | |
1126 | |
1127 /* The client has deferred rescanning. */ | |
1128 if (df->changeable_flags & DF_DEFER_INSN_RESCAN) | |
1129 { | |
1130 if (insn_info) | |
1131 { | |
1132 bitmap_clear_bit (df->insns_to_rescan, uid); | |
1133 bitmap_clear_bit (df->insns_to_notes_rescan, uid); | |
1134 bitmap_set_bit (df->insns_to_delete, uid); | |
1135 } | |
1136 if (dump_file) | |
1137 fprintf (dump_file, "deferring deletion of insn with uid = %d.\n", uid); | |
1138 return; | |
1139 } | |
1140 | |
1141 if (dump_file) | |
1142 fprintf (dump_file, "deleting insn with uid = %d.\n", uid); | |
1143 | |
1144 bitmap_clear_bit (df->insns_to_delete, uid); | |
1145 bitmap_clear_bit (df->insns_to_rescan, uid); | |
1146 bitmap_clear_bit (df->insns_to_notes_rescan, uid); | |
1147 if (insn_info) | |
1148 { | |
1149 struct df_scan_problem_data *problem_data | |
1150 = (struct df_scan_problem_data *) df_scan->problem_data; | |
1151 | |
1152 /* In general, notes do not have the insn_info fields | |
1153 initialized. However, combine deletes insns by changing them | |
1154 to notes. How clever. So we cannot just check if it is a | |
1155 valid insn before short circuiting this code, we need to see | |
1156 if we actually initialized it. */ | |
1157 if (insn_info->defs) | |
1158 { | |
1159 df_mw_hardreg_chain_delete (insn_info->mw_hardregs); | |
1160 | |
1161 if (df_chain) | |
1162 { | |
1163 df_ref_chain_delete_du_chain (insn_info->defs); | |
1164 df_ref_chain_delete_du_chain (insn_info->uses); | |
1165 df_ref_chain_delete_du_chain (insn_info->eq_uses); | |
1166 } | |
1167 | |
1168 df_ref_chain_delete (insn_info->defs); | |
1169 df_ref_chain_delete (insn_info->uses); | |
1170 df_ref_chain_delete (insn_info->eq_uses); | |
1171 } | |
1172 pool_free (problem_data->insn_pool, insn_info); | |
1173 DF_INSN_UID_SET (uid, NULL); | |
1174 } | |
1175 } | |
1176 | |
1177 | |
1178 /* Free all of the refs and the mw_hardregs in COLLECTION_REC. */ | |
1179 | |
1180 static void | |
1181 df_free_collection_rec (struct df_collection_rec *collection_rec) | |
1182 { | |
1183 struct df_scan_problem_data *problem_data | |
1184 = (struct df_scan_problem_data *) df_scan->problem_data; | |
1185 df_ref *ref; | |
1186 struct df_mw_hardreg **mw; | |
1187 | |
1188 if (collection_rec->def_vec) | |
1189 for (ref = collection_rec->def_vec; *ref; ref++) | |
1190 df_free_ref (*ref); | |
1191 if (collection_rec->use_vec) | |
1192 for (ref = collection_rec->use_vec; *ref; ref++) | |
1193 df_free_ref (*ref); | |
1194 if (collection_rec->eq_use_vec) | |
1195 for (ref = collection_rec->eq_use_vec; *ref; ref++) | |
1196 df_free_ref (*ref); | |
1197 if (collection_rec->mw_vec) | |
1198 for (mw = collection_rec->mw_vec; *mw; mw++) | |
1199 pool_free (problem_data->mw_reg_pool, *mw); | |
1200 } | |
1201 | |
1202 | |
1203 /* Rescan INSN. Return TRUE if the rescanning produced any changes. */ | |
1204 | |
1205 bool | |
1206 df_insn_rescan (rtx insn) | |
1207 { | |
1208 unsigned int uid = INSN_UID (insn); | |
1209 struct df_insn_info *insn_info = NULL; | |
1210 basic_block bb = BLOCK_FOR_INSN (insn); | |
1211 struct df_collection_rec collection_rec; | |
1212 collection_rec.def_vec = XALLOCAVEC (df_ref, 1000); | |
1213 collection_rec.use_vec = XALLOCAVEC (df_ref, 1000); | |
1214 collection_rec.eq_use_vec = XALLOCAVEC (df_ref, 1000); | |
1215 collection_rec.mw_vec = XALLOCAVEC (struct df_mw_hardreg *, 100); | |
1216 | |
1217 if ((!df) || (!INSN_P (insn))) | |
1218 return false; | |
1219 | |
1220 if (!bb) | |
1221 { | |
1222 if (dump_file) | |
1223 fprintf (dump_file, "no bb for insn with uid = %d.\n", uid); | |
1224 return false; | |
1225 } | |
1226 | |
1227 /* The client has disabled rescanning and plans to do it itself. */ | |
1228 if (df->changeable_flags & DF_NO_INSN_RESCAN) | |
1229 return false; | |
1230 | |
1231 df_grow_bb_info (df_scan); | |
1232 df_grow_reg_info (); | |
1233 | |
1234 insn_info = DF_INSN_UID_SAFE_GET (uid); | |
1235 | |
1236 /* The client has deferred rescanning. */ | |
1237 if (df->changeable_flags & DF_DEFER_INSN_RESCAN) | |
1238 { | |
1239 if (!insn_info) | |
1240 { | |
1241 insn_info = df_insn_create_insn_record (insn); | |
1242 insn_info->defs = df_null_ref_rec; | |
1243 insn_info->uses = df_null_ref_rec; | |
1244 insn_info->eq_uses = df_null_ref_rec; | |
1245 insn_info->mw_hardregs = df_null_mw_rec; | |
1246 } | |
1247 if (dump_file) | |
1248 fprintf (dump_file, "deferring rescan insn with uid = %d.\n", uid); | |
1249 | |
1250 bitmap_clear_bit (df->insns_to_delete, uid); | |
1251 bitmap_clear_bit (df->insns_to_notes_rescan, uid); | |
1252 bitmap_set_bit (df->insns_to_rescan, INSN_UID (insn)); | |
1253 return false; | |
1254 } | |
1255 | |
1256 bitmap_clear_bit (df->insns_to_delete, uid); | |
1257 bitmap_clear_bit (df->insns_to_rescan, uid); | |
1258 bitmap_clear_bit (df->insns_to_notes_rescan, uid); | |
1259 if (insn_info) | |
1260 { | |
1261 bool the_same = df_insn_refs_verify (&collection_rec, bb, insn, false); | |
1262 /* If there's no change, return false. */ | |
1263 if (the_same) | |
1264 { | |
1265 df_free_collection_rec (&collection_rec); | |
1266 if (dump_file) | |
1267 fprintf (dump_file, "verify found no changes in insn with uid = %d.\n", uid); | |
1268 return false; | |
1269 } | |
1270 if (dump_file) | |
1271 fprintf (dump_file, "rescanning insn with uid = %d.\n", uid); | |
1272 | |
1273 /* There's change - we need to delete the existing info. */ | |
1274 df_insn_delete (NULL, uid); | |
1275 df_insn_create_insn_record (insn); | |
1276 } | |
1277 else | |
1278 { | |
1279 struct df_insn_info *insn_info = df_insn_create_insn_record (insn); | |
1280 df_insn_refs_collect (&collection_rec, bb, insn_info); | |
1281 if (dump_file) | |
1282 fprintf (dump_file, "scanning new insn with uid = %d.\n", uid); | |
1283 } | |
1284 | |
1285 df_refs_add_to_chains (&collection_rec, bb, insn); | |
1286 df_set_bb_dirty (bb); | |
1287 return true; | |
1288 } | |
1289 | |
1290 | |
1291 /* Rescan all of the insns in the function. Note that the artificial | |
1292 uses and defs are not touched. This function will destroy def-se | |
1293 or use-def chains. */ | |
1294 | |
1295 void | |
1296 df_insn_rescan_all (void) | |
1297 { | |
1298 bool no_insn_rescan = false; | |
1299 bool defer_insn_rescan = false; | |
1300 basic_block bb; | |
1301 bitmap_iterator bi; | |
1302 unsigned int uid; | |
1303 bitmap tmp = BITMAP_ALLOC (&df_bitmap_obstack); | |
1304 | |
1305 if (df->changeable_flags & DF_NO_INSN_RESCAN) | |
1306 { | |
1307 df_clear_flags (DF_NO_INSN_RESCAN); | |
1308 no_insn_rescan = true; | |
1309 } | |
1310 | |
1311 if (df->changeable_flags & DF_DEFER_INSN_RESCAN) | |
1312 { | |
1313 df_clear_flags (DF_DEFER_INSN_RESCAN); | |
1314 defer_insn_rescan = true; | |
1315 } | |
1316 | |
1317 bitmap_copy (tmp, df->insns_to_delete); | |
1318 EXECUTE_IF_SET_IN_BITMAP (tmp, 0, uid, bi) | |
1319 { | |
1320 struct df_insn_info *insn_info = DF_INSN_UID_SAFE_GET (uid); | |
1321 if (insn_info) | |
1322 df_insn_delete (NULL, uid); | |
1323 } | |
1324 | |
1325 BITMAP_FREE (tmp); | |
1326 bitmap_clear (df->insns_to_delete); | |
1327 bitmap_clear (df->insns_to_rescan); | |
1328 bitmap_clear (df->insns_to_notes_rescan); | |
1329 | |
1330 FOR_EACH_BB (bb) | |
1331 { | |
1332 rtx insn; | |
1333 FOR_BB_INSNS (bb, insn) | |
1334 { | |
1335 df_insn_rescan (insn); | |
1336 } | |
1337 } | |
1338 | |
1339 if (no_insn_rescan) | |
1340 df_set_flags (DF_NO_INSN_RESCAN); | |
1341 if (defer_insn_rescan) | |
1342 df_set_flags (DF_DEFER_INSN_RESCAN); | |
1343 } | |
1344 | |
1345 | |
1346 /* Process all of the deferred rescans or deletions. */ | |
1347 | |
1348 void | |
1349 df_process_deferred_rescans (void) | |
1350 { | |
1351 bool no_insn_rescan = false; | |
1352 bool defer_insn_rescan = false; | |
1353 bitmap_iterator bi; | |
1354 unsigned int uid; | |
1355 bitmap tmp = BITMAP_ALLOC (&df_bitmap_obstack); | |
1356 | |
1357 if (df->changeable_flags & DF_NO_INSN_RESCAN) | |
1358 { | |
1359 df_clear_flags (DF_NO_INSN_RESCAN); | |
1360 no_insn_rescan = true; | |
1361 } | |
1362 | |
1363 if (df->changeable_flags & DF_DEFER_INSN_RESCAN) | |
1364 { | |
1365 df_clear_flags (DF_DEFER_INSN_RESCAN); | |
1366 defer_insn_rescan = true; | |
1367 } | |
1368 | |
1369 if (dump_file) | |
1370 fprintf (dump_file, "starting the processing of deferred insns\n"); | |
1371 | |
1372 bitmap_copy (tmp, df->insns_to_delete); | |
1373 EXECUTE_IF_SET_IN_BITMAP (tmp, 0, uid, bi) | |
1374 { | |
1375 struct df_insn_info *insn_info = DF_INSN_UID_SAFE_GET (uid); | |
1376 if (insn_info) | |
1377 df_insn_delete (NULL, uid); | |
1378 } | |
1379 | |
1380 bitmap_copy (tmp, df->insns_to_rescan); | |
1381 EXECUTE_IF_SET_IN_BITMAP (tmp, 0, uid, bi) | |
1382 { | |
1383 struct df_insn_info *insn_info = DF_INSN_UID_SAFE_GET (uid); | |
1384 if (insn_info) | |
1385 df_insn_rescan (insn_info->insn); | |
1386 } | |
1387 | |
1388 bitmap_copy (tmp, df->insns_to_notes_rescan); | |
1389 EXECUTE_IF_SET_IN_BITMAP (tmp, 0, uid, bi) | |
1390 { | |
1391 struct df_insn_info *insn_info = DF_INSN_UID_SAFE_GET (uid); | |
1392 if (insn_info) | |
1393 df_notes_rescan (insn_info->insn); | |
1394 } | |
1395 | |
1396 if (dump_file) | |
1397 fprintf (dump_file, "ending the processing of deferred insns\n"); | |
1398 | |
1399 BITMAP_FREE (tmp); | |
1400 bitmap_clear (df->insns_to_delete); | |
1401 bitmap_clear (df->insns_to_rescan); | |
1402 bitmap_clear (df->insns_to_notes_rescan); | |
1403 | |
1404 if (no_insn_rescan) | |
1405 df_set_flags (DF_NO_INSN_RESCAN); | |
1406 if (defer_insn_rescan) | |
1407 df_set_flags (DF_DEFER_INSN_RESCAN); | |
1408 | |
1409 /* If someone changed regs_ever_live during this pass, fix up the | |
1410 entry and exit blocks. */ | |
1411 if (df->redo_entry_and_exit) | |
1412 { | |
1413 df_update_entry_exit_and_calls (); | |
1414 df->redo_entry_and_exit = false; | |
1415 } | |
1416 } | |
1417 | |
1418 | |
1419 /* Count the number of refs. Include the defs if INCLUDE_DEFS. Include | |
1420 the uses if INCLUDE_USES. Include the eq_uses if | |
1421 INCLUDE_EQ_USES. */ | |
1422 | |
1423 static unsigned int | |
1424 df_count_refs (bool include_defs, bool include_uses, | |
1425 bool include_eq_uses) | |
1426 { | |
1427 unsigned int regno; | |
1428 int size = 0; | |
1429 unsigned int m = df->regs_inited; | |
1430 | |
1431 for (regno = 0; regno < m; regno++) | |
1432 { | |
1433 if (include_defs) | |
1434 size += DF_REG_DEF_COUNT (regno); | |
1435 if (include_uses) | |
1436 size += DF_REG_USE_COUNT (regno); | |
1437 if (include_eq_uses) | |
1438 size += DF_REG_EQ_USE_COUNT (regno); | |
1439 } | |
1440 return size; | |
1441 } | |
1442 | |
1443 | |
1444 /* Take build ref table for either the uses or defs from the reg-use | |
1445 or reg-def chains. This version processes the refs in reg order | |
1446 which is likely to be best if processing the whole function. */ | |
1447 | |
1448 static void | |
1449 df_reorganize_refs_by_reg_by_reg (struct df_ref_info *ref_info, | |
1450 bool include_defs, | |
1451 bool include_uses, | |
1452 bool include_eq_uses) | |
1453 { | |
1454 unsigned int m = df->regs_inited; | |
1455 unsigned int regno; | |
1456 unsigned int offset = 0; | |
1457 unsigned int start; | |
1458 | |
1459 if (df->changeable_flags & DF_NO_HARD_REGS) | |
1460 { | |
1461 start = FIRST_PSEUDO_REGISTER; | |
1462 memset (ref_info->begin, 0, sizeof (int) * FIRST_PSEUDO_REGISTER); | |
1463 memset (ref_info->count, 0, sizeof (int) * FIRST_PSEUDO_REGISTER); | |
1464 } | |
1465 else | |
1466 start = 0; | |
1467 | |
1468 ref_info->total_size | |
1469 = df_count_refs (include_defs, include_uses, include_eq_uses); | |
1470 | |
1471 df_check_and_grow_ref_info (ref_info, 1); | |
1472 | |
1473 for (regno = start; regno < m; regno++) | |
1474 { | |
1475 int count = 0; | |
1476 ref_info->begin[regno] = offset; | |
1477 if (include_defs) | |
1478 { | |
1479 df_ref ref = DF_REG_DEF_CHAIN (regno); | |
1480 while (ref) | |
1481 { | |
1482 ref_info->refs[offset] = ref; | |
1483 DF_REF_ID (ref) = offset++; | |
1484 count++; | |
1485 ref = DF_REF_NEXT_REG (ref); | |
1486 gcc_assert (offset < ref_info->refs_size); | |
1487 } | |
1488 } | |
1489 if (include_uses) | |
1490 { | |
1491 df_ref ref = DF_REG_USE_CHAIN (regno); | |
1492 while (ref) | |
1493 { | |
1494 ref_info->refs[offset] = ref; | |
1495 DF_REF_ID (ref) = offset++; | |
1496 count++; | |
1497 ref = DF_REF_NEXT_REG (ref); | |
1498 gcc_assert (offset < ref_info->refs_size); | |
1499 } | |
1500 } | |
1501 if (include_eq_uses) | |
1502 { | |
1503 df_ref ref = DF_REG_EQ_USE_CHAIN (regno); | |
1504 while (ref) | |
1505 { | |
1506 ref_info->refs[offset] = ref; | |
1507 DF_REF_ID (ref) = offset++; | |
1508 count++; | |
1509 ref = DF_REF_NEXT_REG (ref); | |
1510 gcc_assert (offset < ref_info->refs_size); | |
1511 } | |
1512 } | |
1513 ref_info->count[regno] = count; | |
1514 } | |
1515 | |
1516 /* The bitmap size is not decremented when refs are deleted. So | |
1517 reset it now that we have squished out all of the empty | |
1518 slots. */ | |
1519 ref_info->table_size = offset; | |
1520 } | |
1521 | |
1522 | |
1523 /* Take build ref table for either the uses or defs from the reg-use | |
1524 or reg-def chains. This version processes the refs in insn order | |
1525 which is likely to be best if processing some segment of the | |
1526 function. */ | |
1527 | |
1528 static void | |
1529 df_reorganize_refs_by_reg_by_insn (struct df_ref_info *ref_info, | |
1530 bool include_defs, | |
1531 bool include_uses, | |
1532 bool include_eq_uses) | |
1533 { | |
1534 bitmap_iterator bi; | |
1535 unsigned int bb_index; | |
1536 unsigned int m = df->regs_inited; | |
1537 unsigned int offset = 0; | |
1538 unsigned int r; | |
1539 unsigned int start | |
1540 = (df->changeable_flags & DF_NO_HARD_REGS) ? FIRST_PSEUDO_REGISTER : 0; | |
1541 | |
1542 memset (ref_info->begin, 0, sizeof (int) * df->regs_inited); | |
1543 memset (ref_info->count, 0, sizeof (int) * df->regs_inited); | |
1544 | |
1545 ref_info->total_size = df_count_refs (include_defs, include_uses, include_eq_uses); | |
1546 df_check_and_grow_ref_info (ref_info, 1); | |
1547 | |
1548 EXECUTE_IF_SET_IN_BITMAP (df->blocks_to_analyze, 0, bb_index, bi) | |
1549 { | |
1550 basic_block bb = BASIC_BLOCK (bb_index); | |
1551 rtx insn; | |
1552 df_ref *ref_rec; | |
1553 | |
1554 if (include_defs) | |
1555 for (ref_rec = df_get_artificial_defs (bb_index); *ref_rec; ref_rec++) | |
1556 { | |
1557 unsigned int regno = DF_REF_REGNO (*ref_rec); | |
1558 ref_info->count[regno]++; | |
1559 } | |
1560 if (include_uses) | |
1561 for (ref_rec = df_get_artificial_uses (bb_index); *ref_rec; ref_rec++) | |
1562 { | |
1563 unsigned int regno = DF_REF_REGNO (*ref_rec); | |
1564 ref_info->count[regno]++; | |
1565 } | |
1566 | |
1567 FOR_BB_INSNS (bb, insn) | |
1568 { | |
1569 if (INSN_P (insn)) | |
1570 { | |
1571 unsigned int uid = INSN_UID (insn); | |
1572 | |
1573 if (include_defs) | |
1574 for (ref_rec = DF_INSN_UID_DEFS (uid); *ref_rec; ref_rec++) | |
1575 { | |
1576 unsigned int regno = DF_REF_REGNO (*ref_rec); | |
1577 ref_info->count[regno]++; | |
1578 } | |
1579 if (include_uses) | |
1580 for (ref_rec = DF_INSN_UID_USES (uid); *ref_rec; ref_rec++) | |
1581 { | |
1582 unsigned int regno = DF_REF_REGNO (*ref_rec); | |
1583 ref_info->count[regno]++; | |
1584 } | |
1585 if (include_eq_uses) | |
1586 for (ref_rec = DF_INSN_UID_EQ_USES (uid); *ref_rec; ref_rec++) | |
1587 { | |
1588 unsigned int regno = DF_REF_REGNO (*ref_rec); | |
1589 ref_info->count[regno]++; | |
1590 } | |
1591 } | |
1592 } | |
1593 } | |
1594 | |
1595 for (r = start; r < m; r++) | |
1596 { | |
1597 ref_info->begin[r] = offset; | |
1598 offset += ref_info->count[r]; | |
1599 ref_info->count[r] = 0; | |
1600 } | |
1601 | |
1602 EXECUTE_IF_SET_IN_BITMAP (df->blocks_to_analyze, 0, bb_index, bi) | |
1603 { | |
1604 basic_block bb = BASIC_BLOCK (bb_index); | |
1605 rtx insn; | |
1606 df_ref *ref_rec; | |
1607 | |
1608 if (include_defs) | |
1609 for (ref_rec = df_get_artificial_defs (bb_index); *ref_rec; ref_rec++) | |
1610 { | |
1611 df_ref ref = *ref_rec; | |
1612 unsigned int regno = DF_REF_REGNO (ref); | |
1613 if (regno >= start) | |
1614 { | |
1615 unsigned int id | |
1616 = ref_info->begin[regno] + ref_info->count[regno]++; | |
1617 DF_REF_ID (ref) = id; | |
1618 ref_info->refs[id] = ref; | |
1619 } | |
1620 } | |
1621 if (include_uses) | |
1622 for (ref_rec = df_get_artificial_uses (bb_index); *ref_rec; ref_rec++) | |
1623 { | |
1624 df_ref ref = *ref_rec; | |
1625 unsigned int regno = DF_REF_REGNO (ref); | |
1626 if (regno >= start) | |
1627 { | |
1628 unsigned int id | |
1629 = ref_info->begin[regno] + ref_info->count[regno]++; | |
1630 DF_REF_ID (ref) = id; | |
1631 ref_info->refs[id] = ref; | |
1632 } | |
1633 } | |
1634 | |
1635 FOR_BB_INSNS (bb, insn) | |
1636 { | |
1637 if (INSN_P (insn)) | |
1638 { | |
1639 unsigned int uid = INSN_UID (insn); | |
1640 | |
1641 if (include_defs) | |
1642 for (ref_rec = DF_INSN_UID_DEFS (uid); *ref_rec; ref_rec++) | |
1643 { | |
1644 df_ref ref = *ref_rec; | |
1645 unsigned int regno = DF_REF_REGNO (ref); | |
1646 if (regno >= start) | |
1647 { | |
1648 unsigned int id | |
1649 = ref_info->begin[regno] + ref_info->count[regno]++; | |
1650 DF_REF_ID (ref) = id; | |
1651 ref_info->refs[id] = ref; | |
1652 } | |
1653 } | |
1654 if (include_uses) | |
1655 for (ref_rec = DF_INSN_UID_USES (uid); *ref_rec; ref_rec++) | |
1656 { | |
1657 df_ref ref = *ref_rec; | |
1658 unsigned int regno = DF_REF_REGNO (ref); | |
1659 if (regno >= start) | |
1660 { | |
1661 unsigned int id | |
1662 = ref_info->begin[regno] + ref_info->count[regno]++; | |
1663 DF_REF_ID (ref) = id; | |
1664 ref_info->refs[id] = ref; | |
1665 } | |
1666 } | |
1667 if (include_eq_uses) | |
1668 for (ref_rec = DF_INSN_UID_EQ_USES (uid); *ref_rec; ref_rec++) | |
1669 { | |
1670 df_ref ref = *ref_rec; | |
1671 unsigned int regno = DF_REF_REGNO (ref); | |
1672 if (regno >= start) | |
1673 { | |
1674 unsigned int id | |
1675 = ref_info->begin[regno] + ref_info->count[regno]++; | |
1676 DF_REF_ID (ref) = id; | |
1677 ref_info->refs[id] = ref; | |
1678 } | |
1679 } | |
1680 } | |
1681 } | |
1682 } | |
1683 | |
1684 /* The bitmap size is not decremented when refs are deleted. So | |
1685 reset it now that we have squished out all of the empty | |
1686 slots. */ | |
1687 | |
1688 ref_info->table_size = offset; | |
1689 } | |
1690 | |
1691 /* Take build ref table for either the uses or defs from the reg-use | |
1692 or reg-def chains. */ | |
1693 | |
1694 static void | |
1695 df_reorganize_refs_by_reg (struct df_ref_info *ref_info, | |
1696 bool include_defs, | |
1697 bool include_uses, | |
1698 bool include_eq_uses) | |
1699 { | |
1700 if (df->analyze_subset) | |
1701 df_reorganize_refs_by_reg_by_insn (ref_info, include_defs, | |
1702 include_uses, include_eq_uses); | |
1703 else | |
1704 df_reorganize_refs_by_reg_by_reg (ref_info, include_defs, | |
1705 include_uses, include_eq_uses); | |
1706 } | |
1707 | |
1708 | |
1709 /* Add the refs in REF_VEC to the table in REF_INFO starting at OFFSET. */ | |
1710 static unsigned int | |
1711 df_add_refs_to_table (unsigned int offset, | |
1712 struct df_ref_info *ref_info, | |
1713 df_ref *ref_vec) | |
1714 { | |
1715 while (*ref_vec) | |
1716 { | |
1717 df_ref ref = *ref_vec; | |
1718 if ((!(df->changeable_flags & DF_NO_HARD_REGS)) | |
1719 || (DF_REF_REGNO (ref) >= FIRST_PSEUDO_REGISTER)) | |
1720 { | |
1721 ref_info->refs[offset] = ref; | |
1722 DF_REF_ID (*ref_vec) = offset++; | |
1723 } | |
1724 ref_vec++; | |
1725 } | |
1726 return offset; | |
1727 } | |
1728 | |
1729 | |
1730 /* Count the number of refs in all of the insns of BB. Include the | |
1731 defs if INCLUDE_DEFS. Include the uses if INCLUDE_USES. Include the | |
1732 eq_uses if INCLUDE_EQ_USES. */ | |
1733 | |
1734 static unsigned int | |
1735 df_reorganize_refs_by_insn_bb (basic_block bb, unsigned int offset, | |
1736 struct df_ref_info *ref_info, | |
1737 bool include_defs, bool include_uses, | |
1738 bool include_eq_uses) | |
1739 { | |
1740 rtx insn; | |
1741 | |
1742 if (include_defs) | |
1743 offset = df_add_refs_to_table (offset, ref_info, | |
1744 df_get_artificial_defs (bb->index)); | |
1745 if (include_uses) | |
1746 offset = df_add_refs_to_table (offset, ref_info, | |
1747 df_get_artificial_uses (bb->index)); | |
1748 | |
1749 FOR_BB_INSNS (bb, insn) | |
1750 if (INSN_P (insn)) | |
1751 { | |
1752 unsigned int uid = INSN_UID (insn); | |
1753 if (include_defs) | |
1754 offset = df_add_refs_to_table (offset, ref_info, | |
1755 DF_INSN_UID_DEFS (uid)); | |
1756 if (include_uses) | |
1757 offset = df_add_refs_to_table (offset, ref_info, | |
1758 DF_INSN_UID_USES (uid)); | |
1759 if (include_eq_uses) | |
1760 offset = df_add_refs_to_table (offset, ref_info, | |
1761 DF_INSN_UID_EQ_USES (uid)); | |
1762 } | |
1763 return offset; | |
1764 } | |
1765 | |
1766 | |
1767 /* Organize the refs by insn into the table in REF_INFO. If | |
1768 blocks_to_analyze is defined, use that set, otherwise the entire | |
1769 program. Include the defs if INCLUDE_DEFS. Include the uses if | |
1770 INCLUDE_USES. Include the eq_uses if INCLUDE_EQ_USES. */ | |
1771 | |
1772 static void | |
1773 df_reorganize_refs_by_insn (struct df_ref_info *ref_info, | |
1774 bool include_defs, bool include_uses, | |
1775 bool include_eq_uses) | |
1776 { | |
1777 basic_block bb; | |
1778 unsigned int offset = 0; | |
1779 | |
1780 ref_info->total_size = df_count_refs (include_defs, include_uses, include_eq_uses); | |
1781 df_check_and_grow_ref_info (ref_info, 1); | |
1782 if (df->blocks_to_analyze) | |
1783 { | |
1784 bitmap_iterator bi; | |
1785 unsigned int index; | |
1786 | |
1787 EXECUTE_IF_SET_IN_BITMAP (df->blocks_to_analyze, 0, index, bi) | |
1788 { | |
1789 offset = df_reorganize_refs_by_insn_bb (BASIC_BLOCK (index), offset, ref_info, | |
1790 include_defs, include_uses, | |
1791 include_eq_uses); | |
1792 } | |
1793 | |
1794 ref_info->table_size = offset; | |
1795 } | |
1796 else | |
1797 { | |
1798 FOR_ALL_BB (bb) | |
1799 offset = df_reorganize_refs_by_insn_bb (bb, offset, ref_info, | |
1800 include_defs, include_uses, | |
1801 include_eq_uses); | |
1802 ref_info->table_size = offset; | |
1803 } | |
1804 } | |
1805 | |
1806 | |
1807 /* If the use refs in DF are not organized, reorganize them. */ | |
1808 | |
1809 void | |
1810 df_maybe_reorganize_use_refs (enum df_ref_order order) | |
1811 { | |
1812 if (order == df->use_info.ref_order) | |
1813 return; | |
1814 | |
1815 switch (order) | |
1816 { | |
1817 case DF_REF_ORDER_BY_REG: | |
1818 df_reorganize_refs_by_reg (&df->use_info, false, true, false); | |
1819 break; | |
1820 | |
1821 case DF_REF_ORDER_BY_REG_WITH_NOTES: | |
1822 df_reorganize_refs_by_reg (&df->use_info, false, true, true); | |
1823 break; | |
1824 | |
1825 case DF_REF_ORDER_BY_INSN: | |
1826 df_reorganize_refs_by_insn (&df->use_info, false, true, false); | |
1827 break; | |
1828 | |
1829 case DF_REF_ORDER_BY_INSN_WITH_NOTES: | |
1830 df_reorganize_refs_by_insn (&df->use_info, false, true, true); | |
1831 break; | |
1832 | |
1833 case DF_REF_ORDER_NO_TABLE: | |
1834 free (df->use_info.refs); | |
1835 df->use_info.refs = NULL; | |
1836 df->use_info.refs_size = 0; | |
1837 break; | |
1838 | |
1839 case DF_REF_ORDER_UNORDERED: | |
1840 case DF_REF_ORDER_UNORDERED_WITH_NOTES: | |
1841 gcc_unreachable (); | |
1842 break; | |
1843 } | |
1844 | |
1845 df->use_info.ref_order = order; | |
1846 } | |
1847 | |
1848 | |
1849 /* If the def refs in DF are not organized, reorganize them. */ | |
1850 | |
1851 void | |
1852 df_maybe_reorganize_def_refs (enum df_ref_order order) | |
1853 { | |
1854 if (order == df->def_info.ref_order) | |
1855 return; | |
1856 | |
1857 switch (order) | |
1858 { | |
1859 case DF_REF_ORDER_BY_REG: | |
1860 df_reorganize_refs_by_reg (&df->def_info, true, false, false); | |
1861 break; | |
1862 | |
1863 case DF_REF_ORDER_BY_INSN: | |
1864 df_reorganize_refs_by_insn (&df->def_info, true, false, false); | |
1865 break; | |
1866 | |
1867 case DF_REF_ORDER_NO_TABLE: | |
1868 free (df->def_info.refs); | |
1869 df->def_info.refs = NULL; | |
1870 df->def_info.refs_size = 0; | |
1871 break; | |
1872 | |
1873 case DF_REF_ORDER_BY_INSN_WITH_NOTES: | |
1874 case DF_REF_ORDER_BY_REG_WITH_NOTES: | |
1875 case DF_REF_ORDER_UNORDERED: | |
1876 case DF_REF_ORDER_UNORDERED_WITH_NOTES: | |
1877 gcc_unreachable (); | |
1878 break; | |
1879 } | |
1880 | |
1881 df->def_info.ref_order = order; | |
1882 } | |
1883 | |
1884 | |
1885 /* Change all of the basic block references in INSN to use the insn's | |
1886 current basic block. This function is called from routines that move | |
1887 instructions from one block to another. */ | |
1888 | |
1889 void | |
1890 df_insn_change_bb (rtx insn, basic_block new_bb) | |
1891 { | |
1892 basic_block old_bb = BLOCK_FOR_INSN (insn); | |
1893 struct df_insn_info *insn_info; | |
1894 unsigned int uid = INSN_UID (insn); | |
1895 | |
1896 if (old_bb == new_bb) | |
1897 return; | |
1898 | |
1899 set_block_for_insn (insn, new_bb); | |
1900 | |
1901 if (!df) | |
1902 return; | |
1903 | |
1904 if (dump_file) | |
1905 fprintf (dump_file, "changing bb of uid %d\n", uid); | |
1906 | |
1907 insn_info = DF_INSN_UID_SAFE_GET (uid); | |
1908 if (insn_info == NULL) | |
1909 { | |
1910 if (dump_file) | |
1911 fprintf (dump_file, " unscanned insn\n"); | |
1912 df_insn_rescan (insn); | |
1913 return; | |
1914 } | |
1915 | |
1916 if (!INSN_P (insn)) | |
1917 return; | |
1918 | |
1919 df_set_bb_dirty (new_bb); | |
1920 if (old_bb) | |
1921 { | |
1922 if (dump_file) | |
1923 fprintf (dump_file, " from %d to %d\n", | |
1924 old_bb->index, new_bb->index); | |
1925 df_set_bb_dirty (old_bb); | |
1926 } | |
1927 else | |
1928 if (dump_file) | |
1929 fprintf (dump_file, " to %d\n", new_bb->index); | |
1930 } | |
1931 | |
1932 | |
1933 /* Helper function for df_ref_change_reg_with_loc. */ | |
1934 | |
1935 static void | |
1936 df_ref_change_reg_with_loc_1 (struct df_reg_info *old_df, | |
1937 struct df_reg_info *new_df, | |
1938 int new_regno, rtx loc) | |
1939 { | |
1940 df_ref the_ref = old_df->reg_chain; | |
1941 | |
1942 while (the_ref) | |
1943 { | |
1944 if ((!DF_REF_IS_ARTIFICIAL (the_ref)) | |
1945 && (DF_REF_LOC (the_ref)) | |
1946 && (*DF_REF_LOC (the_ref) == loc)) | |
1947 { | |
1948 df_ref next_ref = DF_REF_NEXT_REG (the_ref); | |
1949 df_ref prev_ref = DF_REF_PREV_REG (the_ref); | |
1950 df_ref *ref_vec, *ref_vec_t; | |
1951 struct df_insn_info *insn_info = DF_REF_INSN_INFO (the_ref); | |
1952 unsigned int count = 0; | |
1953 | |
1954 DF_REF_REGNO (the_ref) = new_regno; | |
1955 DF_REF_REG (the_ref) = regno_reg_rtx[new_regno]; | |
1956 | |
1957 /* Pull the_ref out of the old regno chain. */ | |
1958 if (prev_ref) | |
1959 DF_REF_NEXT_REG (prev_ref) = next_ref; | |
1960 else | |
1961 old_df->reg_chain = next_ref; | |
1962 if (next_ref) | |
1963 DF_REF_PREV_REG (next_ref) = prev_ref; | |
1964 old_df->n_refs--; | |
1965 | |
1966 /* Put the ref into the new regno chain. */ | |
1967 DF_REF_PREV_REG (the_ref) = NULL; | |
1968 DF_REF_NEXT_REG (the_ref) = new_df->reg_chain; | |
1969 if (new_df->reg_chain) | |
1970 DF_REF_PREV_REG (new_df->reg_chain) = the_ref; | |
1971 new_df->reg_chain = the_ref; | |
1972 new_df->n_refs++; | |
1973 if (DF_REF_BB (the_ref)) | |
1974 df_set_bb_dirty (DF_REF_BB (the_ref)); | |
1975 | |
1976 /* Need to sort the record again that the ref was in because | |
1977 the regno is a sorting key. First, find the right | |
1978 record. */ | |
1979 if (DF_REF_FLAGS (the_ref) & DF_REF_IN_NOTE) | |
1980 ref_vec = insn_info->eq_uses; | |
1981 else | |
1982 ref_vec = insn_info->uses; | |
1983 if (dump_file) | |
1984 fprintf (dump_file, "changing reg in insn %d\n", | |
1985 DF_REF_INSN_UID (the_ref)); | |
1986 | |
1987 ref_vec_t = ref_vec; | |
1988 | |
1989 /* Find the length. */ | |
1990 while (*ref_vec_t) | |
1991 { | |
1992 count++; | |
1993 ref_vec_t++; | |
1994 } | |
1995 qsort (ref_vec, count, sizeof (df_ref ), df_ref_compare); | |
1996 | |
1997 the_ref = next_ref; | |
1998 } | |
1999 else | |
2000 the_ref = DF_REF_NEXT_REG (the_ref); | |
2001 } | |
2002 } | |
2003 | |
2004 | |
2005 /* Change the regno of all refs that contained LOC from OLD_REGNO to | |
2006 NEW_REGNO. Refs that do not match LOC are not changed which means | |
2007 that artificial refs are not changed since they have no loc. This | |
2008 call is to support the SET_REGNO macro. */ | |
2009 | |
2010 void | |
2011 df_ref_change_reg_with_loc (int old_regno, int new_regno, rtx loc) | |
2012 { | |
2013 if ((!df) || (old_regno == -1) || (old_regno == new_regno)) | |
2014 return; | |
2015 | |
2016 df_grow_reg_info (); | |
2017 | |
2018 df_ref_change_reg_with_loc_1 (DF_REG_DEF_GET (old_regno), | |
2019 DF_REG_DEF_GET (new_regno), new_regno, loc); | |
2020 df_ref_change_reg_with_loc_1 (DF_REG_USE_GET (old_regno), | |
2021 DF_REG_USE_GET (new_regno), new_regno, loc); | |
2022 df_ref_change_reg_with_loc_1 (DF_REG_EQ_USE_GET (old_regno), | |
2023 DF_REG_EQ_USE_GET (new_regno), new_regno, loc); | |
2024 } | |
2025 | |
2026 | |
2027 /* Delete the mw_hardregs that point into the eq_notes. */ | |
2028 | |
2029 static unsigned int | |
2030 df_mw_hardreg_chain_delete_eq_uses (struct df_insn_info *insn_info) | |
2031 { | |
2032 struct df_mw_hardreg **mw_vec = insn_info->mw_hardregs; | |
2033 unsigned int deleted = 0; | |
2034 unsigned int count = 0; | |
2035 struct df_scan_problem_data *problem_data | |
2036 = (struct df_scan_problem_data *) df_scan->problem_data; | |
2037 | |
2038 if (!*mw_vec) | |
2039 return 0; | |
2040 | |
2041 while (*mw_vec) | |
2042 { | |
2043 if ((*mw_vec)->flags & DF_REF_IN_NOTE) | |
2044 { | |
2045 struct df_mw_hardreg **temp_vec = mw_vec; | |
2046 | |
2047 pool_free (problem_data->mw_reg_pool, *mw_vec); | |
2048 temp_vec = mw_vec; | |
2049 /* Shove the remaining ones down one to fill the gap. While | |
2050 this looks n**2, it is highly unusual to have any mw regs | |
2051 in eq_notes and the chances of more than one are almost | |
2052 non existent. */ | |
2053 while (*temp_vec) | |
2054 { | |
2055 *temp_vec = *(temp_vec + 1); | |
2056 temp_vec++; | |
2057 } | |
2058 deleted++; | |
2059 } | |
2060 else | |
2061 { | |
2062 mw_vec++; | |
2063 count++; | |
2064 } | |
2065 } | |
2066 | |
2067 if (count == 0) | |
2068 { | |
2069 df_scan_free_mws_vec (insn_info->mw_hardregs); | |
2070 insn_info->mw_hardregs = df_null_mw_rec; | |
2071 return 0; | |
2072 } | |
2073 return deleted; | |
2074 } | |
2075 | |
2076 | |
2077 /* Rescan only the REG_EQUIV/REG_EQUAL notes part of INSN. */ | |
2078 | |
2079 void | |
2080 df_notes_rescan (rtx insn) | |
2081 { | |
2082 struct df_insn_info *insn_info; | |
2083 unsigned int uid = INSN_UID (insn); | |
2084 | |
2085 if (!df) | |
2086 return; | |
2087 | |
2088 /* The client has disabled rescanning and plans to do it itself. */ | |
2089 if (df->changeable_flags & DF_NO_INSN_RESCAN) | |
2090 return; | |
2091 | |
2092 /* Do nothing if the insn hasn't been emitted yet. */ | |
2093 if (!BLOCK_FOR_INSN (insn)) | |
2094 return; | |
2095 | |
2096 df_grow_bb_info (df_scan); | |
2097 df_grow_reg_info (); | |
2098 | |
2099 insn_info = DF_INSN_UID_SAFE_GET (INSN_UID(insn)); | |
2100 | |
2101 /* The client has deferred rescanning. */ | |
2102 if (df->changeable_flags & DF_DEFER_INSN_RESCAN) | |
2103 { | |
2104 if (!insn_info) | |
2105 { | |
2106 insn_info = df_insn_create_insn_record (insn); | |
2107 insn_info->defs = df_null_ref_rec; | |
2108 insn_info->uses = df_null_ref_rec; | |
2109 insn_info->eq_uses = df_null_ref_rec; | |
2110 insn_info->mw_hardregs = df_null_mw_rec; | |
2111 } | |
2112 | |
2113 bitmap_clear_bit (df->insns_to_delete, uid); | |
2114 /* If the insn is set to be rescanned, it does not need to also | |
2115 be notes rescanned. */ | |
2116 if (!bitmap_bit_p (df->insns_to_rescan, uid)) | |
2117 bitmap_set_bit (df->insns_to_notes_rescan, INSN_UID (insn)); | |
2118 return; | |
2119 } | |
2120 | |
2121 bitmap_clear_bit (df->insns_to_delete, uid); | |
2122 bitmap_clear_bit (df->insns_to_notes_rescan, uid); | |
2123 | |
2124 if (insn_info) | |
2125 { | |
2126 basic_block bb = BLOCK_FOR_INSN (insn); | |
2127 rtx note; | |
2128 struct df_collection_rec collection_rec; | |
2129 unsigned int num_deleted; | |
2130 | |
2131 memset (&collection_rec, 0, sizeof (struct df_collection_rec)); | |
2132 collection_rec.eq_use_vec = XALLOCAVEC (df_ref, 1000); | |
2133 collection_rec.mw_vec = XALLOCAVEC (struct df_mw_hardreg *, 1000); | |
2134 | |
2135 num_deleted = df_mw_hardreg_chain_delete_eq_uses (insn_info); | |
2136 df_ref_chain_delete (insn_info->eq_uses); | |
2137 insn_info->eq_uses = NULL; | |
2138 | |
2139 /* Process REG_EQUIV/REG_EQUAL notes */ | |
2140 for (note = REG_NOTES (insn); note; | |
2141 note = XEXP (note, 1)) | |
2142 { | |
2143 switch (REG_NOTE_KIND (note)) | |
2144 { | |
2145 case REG_EQUIV: | |
2146 case REG_EQUAL: | |
2147 df_uses_record (DF_REF_REGULAR, &collection_rec, | |
2148 &XEXP (note, 0), DF_REF_REG_USE, | |
2149 bb, insn_info, DF_REF_IN_NOTE, -1, -1, 0); | |
2150 default: | |
2151 break; | |
2152 } | |
2153 } | |
2154 | |
2155 /* Find some place to put any new mw_hardregs. */ | |
2156 df_canonize_collection_rec (&collection_rec); | |
2157 if (collection_rec.next_mw) | |
2158 { | |
2159 unsigned int count = 0; | |
2160 struct df_mw_hardreg **mw_rec = insn_info->mw_hardregs; | |
2161 while (*mw_rec) | |
2162 { | |
2163 count++; | |
2164 mw_rec++; | |
2165 } | |
2166 | |
2167 if (count) | |
2168 { | |
2169 /* Append to the end of the existing record after | |
2170 expanding it if necessary. */ | |
2171 if (collection_rec.next_mw > num_deleted) | |
2172 { | |
2173 insn_info->mw_hardregs = | |
2174 XRESIZEVEC (struct df_mw_hardreg *, | |
2175 insn_info->mw_hardregs, | |
2176 count + 1 + collection_rec.next_mw); | |
2177 } | |
2178 memcpy (&insn_info->mw_hardregs[count], collection_rec.mw_vec, | |
2179 (collection_rec.next_mw + 1) * sizeof (struct df_mw_hardreg *)); | |
2180 qsort (insn_info->mw_hardregs, count + collection_rec.next_mw, | |
2181 sizeof (struct df_mw_hardreg *), df_mw_compare); | |
2182 } | |
2183 else | |
2184 { | |
2185 /* No vector there. */ | |
2186 insn_info->mw_hardregs | |
2187 = XNEWVEC (struct df_mw_hardreg*, | |
2188 count + 1 + collection_rec.next_mw); | |
2189 memcpy (insn_info->mw_hardregs, collection_rec.mw_vec, | |
2190 (collection_rec.next_mw + 1) * sizeof (struct df_mw_hardreg *)); | |
2191 } | |
2192 } | |
2193 /* Get rid of the mw_rec so that df_refs_add_to_chains will | |
2194 ignore it. */ | |
2195 collection_rec.mw_vec = NULL; | |
2196 collection_rec.next_mw = 0; | |
2197 df_refs_add_to_chains (&collection_rec, bb, insn); | |
2198 } | |
2199 else | |
2200 df_insn_rescan (insn); | |
2201 | |
2202 } | |
2203 | |
2204 | |
2205 /*---------------------------------------------------------------------------- | |
2206 Hard core instruction scanning code. No external interfaces here, | |
2207 just a lot of routines that look inside insns. | |
2208 ----------------------------------------------------------------------------*/ | |
2209 | |
2210 | |
2211 /* Return true if the contents of two df_ref's are identical. | |
2212 It ignores DF_REF_MARKER. */ | |
2213 | |
2214 static bool | |
2215 df_ref_equal_p (df_ref ref1, df_ref ref2) | |
2216 { | |
2217 if (!ref2) | |
2218 return false; | |
2219 | |
2220 if (ref1 == ref2) | |
2221 return true; | |
2222 | |
2223 if (DF_REF_CLASS (ref1) != DF_REF_CLASS (ref2) | |
2224 || DF_REF_REGNO (ref1) != DF_REF_REGNO (ref2) | |
2225 || DF_REF_REG (ref1) != DF_REF_REG (ref2) | |
2226 || DF_REF_TYPE (ref1) != DF_REF_TYPE (ref2) | |
2227 || ((DF_REF_FLAGS (ref1) & ~(DF_REF_REG_MARKER + DF_REF_MW_HARDREG)) | |
2228 != (DF_REF_FLAGS (ref2) & ~(DF_REF_REG_MARKER + DF_REF_MW_HARDREG))) | |
2229 || DF_REF_BB (ref1) != DF_REF_BB (ref2) | |
2230 || DF_REF_INSN_INFO (ref1) != DF_REF_INSN_INFO (ref2)) | |
2231 return false; | |
2232 | |
2233 switch (DF_REF_CLASS (ref1)) | |
2234 { | |
2235 case DF_REF_ARTIFICIAL: | |
2236 case DF_REF_BASE: | |
2237 return true; | |
2238 | |
2239 case DF_REF_EXTRACT: | |
2240 if ((DF_REF_EXTRACT_OFFSET (ref1) != DF_REF_EXTRACT_OFFSET (ref2)) | |
2241 || (DF_REF_EXTRACT_WIDTH (ref1) != DF_REF_EXTRACT_WIDTH (ref2)) | |
2242 || (DF_REF_EXTRACT_MODE (ref1) != DF_REF_EXTRACT_MODE (ref2))) | |
2243 return false; | |
2244 /* fallthru. */ | |
2245 | |
2246 case DF_REF_REGULAR: | |
2247 return DF_REF_LOC (ref1) == DF_REF_LOC (ref2); | |
2248 | |
2249 default: | |
2250 gcc_unreachable (); | |
2251 } | |
2252 return false; | |
2253 } | |
2254 | |
2255 | |
2256 /* Compare REF1 and REF2 for sorting. This is only called from places | |
2257 where all of the refs are of the same type, in the same insn, and | |
2258 have the same bb. So these fields are not checked. */ | |
2259 | |
2260 static int | |
2261 df_ref_compare (const void *r1, const void *r2) | |
2262 { | |
2263 const df_ref ref1 = *(const df_ref *)r1; | |
2264 const df_ref ref2 = *(const df_ref *)r2; | |
2265 | |
2266 if (ref1 == ref2) | |
2267 return 0; | |
2268 | |
2269 if (DF_REF_CLASS (ref1) != DF_REF_CLASS (ref2)) | |
2270 return (int)DF_REF_CLASS (ref1) - (int)DF_REF_CLASS (ref2); | |
2271 | |
2272 if (DF_REF_REGNO (ref1) != DF_REF_REGNO (ref2)) | |
2273 return (int)DF_REF_REGNO (ref1) - (int)DF_REF_REGNO (ref2); | |
2274 | |
2275 if (DF_REF_TYPE (ref1) != DF_REF_TYPE (ref2)) | |
2276 return (int)DF_REF_TYPE (ref1) - (int)DF_REF_TYPE (ref2); | |
2277 | |
2278 if (DF_REF_REG (ref1) != DF_REF_REG (ref2)) | |
2279 return (int)DF_REF_ORDER (ref1) - (int)DF_REF_ORDER (ref2); | |
2280 | |
2281 /* Cannot look at the LOC field on artificial refs. */ | |
2282 if (DF_REF_CLASS (ref1) != DF_REF_ARTIFICIAL | |
2283 && DF_REF_LOC (ref1) != DF_REF_LOC (ref2)) | |
2284 return (int)DF_REF_ORDER (ref1) - (int)DF_REF_ORDER (ref2); | |
2285 | |
2286 if (DF_REF_FLAGS (ref1) != DF_REF_FLAGS (ref2)) | |
2287 { | |
2288 /* If two refs are identical except that one of them has is from | |
2289 a mw and one is not, we need to have the one with the mw | |
2290 first. */ | |
2291 if (DF_REF_FLAGS_IS_SET (ref1, DF_REF_MW_HARDREG) == | |
2292 DF_REF_FLAGS_IS_SET (ref2, DF_REF_MW_HARDREG)) | |
2293 return DF_REF_FLAGS (ref1) - DF_REF_FLAGS (ref2); | |
2294 else if (DF_REF_FLAGS_IS_SET (ref1, DF_REF_MW_HARDREG)) | |
2295 return -1; | |
2296 else | |
2297 return 1; | |
2298 } | |
2299 | |
2300 /* The classes are the same at this point so it is safe to only look | |
2301 at ref1. */ | |
2302 if (DF_REF_CLASS (ref1) == DF_REF_EXTRACT) | |
2303 { | |
2304 if (DF_REF_EXTRACT_OFFSET (ref1) != DF_REF_EXTRACT_OFFSET (ref2)) | |
2305 return DF_REF_EXTRACT_OFFSET (ref1) - DF_REF_EXTRACT_OFFSET (ref2); | |
2306 if (DF_REF_EXTRACT_WIDTH (ref1) != DF_REF_EXTRACT_WIDTH (ref2)) | |
2307 return DF_REF_EXTRACT_WIDTH (ref1) - DF_REF_EXTRACT_WIDTH (ref2); | |
2308 if (DF_REF_EXTRACT_MODE (ref1) != DF_REF_EXTRACT_MODE (ref2)) | |
2309 return DF_REF_EXTRACT_MODE (ref1) - DF_REF_EXTRACT_MODE (ref2); | |
2310 } | |
2311 return 0; | |
2312 } | |
2313 | |
2314 static void | |
2315 df_swap_refs (df_ref *ref_vec, int i, int j) | |
2316 { | |
2317 df_ref tmp = ref_vec[i]; | |
2318 ref_vec[i] = ref_vec[j]; | |
2319 ref_vec[j] = tmp; | |
2320 } | |
2321 | |
2322 /* Sort and compress a set of refs. */ | |
2323 | |
2324 static unsigned int | |
2325 df_sort_and_compress_refs (df_ref *ref_vec, unsigned int count) | |
2326 { | |
2327 unsigned int i; | |
2328 unsigned int dist = 0; | |
2329 | |
2330 ref_vec[count] = NULL; | |
2331 /* If there are 1 or 0 elements, there is nothing to do. */ | |
2332 if (count < 2) | |
2333 return count; | |
2334 else if (count == 2) | |
2335 { | |
2336 if (df_ref_compare (&ref_vec[0], &ref_vec[1]) > 0) | |
2337 df_swap_refs (ref_vec, 0, 1); | |
2338 } | |
2339 else | |
2340 { | |
2341 for (i = 0; i < count - 1; i++) | |
2342 if (df_ref_compare (&ref_vec[i], &ref_vec[i+1]) >= 0) | |
2343 break; | |
2344 /* If the array is already strictly ordered, | |
2345 which is the most common case for large COUNT case | |
2346 (which happens for CALL INSNs), | |
2347 no need to sort and filter out duplicate. | |
2348 Simply return the count. | |
2349 Make sure DF_GET_ADD_REFS adds refs in the increasing order | |
2350 of DF_REF_COMPARE. */ | |
2351 if (i == count - 1) | |
2352 return count; | |
2353 qsort (ref_vec, count, sizeof (df_ref), df_ref_compare); | |
2354 } | |
2355 | |
2356 for (i=0; i<count-dist; i++) | |
2357 { | |
2358 /* Find the next ref that is not equal to the current ref. */ | |
2359 while (df_ref_equal_p (ref_vec[i], ref_vec[i + dist + 1])) | |
2360 { | |
2361 df_free_ref (ref_vec[i + dist + 1]); | |
2362 dist++; | |
2363 } | |
2364 /* Copy it down to the next position. */ | |
2365 if (dist) | |
2366 ref_vec[i+1] = ref_vec[i + dist + 1]; | |
2367 } | |
2368 | |
2369 count -= dist; | |
2370 ref_vec[count] = NULL; | |
2371 return count; | |
2372 } | |
2373 | |
2374 | |
2375 /* Return true if the contents of two df_ref's are identical. | |
2376 It ignores DF_REF_MARKER. */ | |
2377 | |
2378 static bool | |
2379 df_mw_equal_p (struct df_mw_hardreg *mw1, struct df_mw_hardreg *mw2) | |
2380 { | |
2381 if (!mw2) | |
2382 return false; | |
2383 return (mw1 == mw2) || | |
2384 (mw1->mw_reg == mw2->mw_reg | |
2385 && mw1->type == mw2->type | |
2386 && mw1->flags == mw2->flags | |
2387 && mw1->start_regno == mw2->start_regno | |
2388 && mw1->end_regno == mw2->end_regno); | |
2389 } | |
2390 | |
2391 | |
2392 /* Compare MW1 and MW2 for sorting. */ | |
2393 | |
2394 static int | |
2395 df_mw_compare (const void *m1, const void *m2) | |
2396 { | |
2397 const struct df_mw_hardreg *const mw1 = *(const struct df_mw_hardreg *const*)m1; | |
2398 const struct df_mw_hardreg *const mw2 = *(const struct df_mw_hardreg *const*)m2; | |
2399 | |
2400 if (mw1 == mw2) | |
2401 return 0; | |
2402 | |
2403 if (mw1->type != mw2->type) | |
2404 return mw1->type - mw2->type; | |
2405 | |
2406 if (mw1->flags != mw2->flags) | |
2407 return mw1->flags - mw2->flags; | |
2408 | |
2409 if (mw1->start_regno != mw2->start_regno) | |
2410 return mw1->start_regno - mw2->start_regno; | |
2411 | |
2412 if (mw1->end_regno != mw2->end_regno) | |
2413 return mw1->end_regno - mw2->end_regno; | |
2414 | |
2415 if (mw1->mw_reg != mw2->mw_reg) | |
2416 return mw1->mw_order - mw2->mw_order; | |
2417 | |
2418 return 0; | |
2419 } | |
2420 | |
2421 | |
2422 /* Sort and compress a set of refs. */ | |
2423 | |
2424 static unsigned int | |
2425 df_sort_and_compress_mws (struct df_mw_hardreg **mw_vec, unsigned int count) | |
2426 { | |
2427 struct df_scan_problem_data *problem_data | |
2428 = (struct df_scan_problem_data *) df_scan->problem_data; | |
2429 unsigned int i; | |
2430 unsigned int dist = 0; | |
2431 mw_vec[count] = NULL; | |
2432 | |
2433 if (count < 2) | |
2434 return count; | |
2435 else if (count == 2) | |
2436 { | |
2437 if (df_mw_compare (&mw_vec[0], &mw_vec[1]) > 0) | |
2438 { | |
2439 struct df_mw_hardreg *tmp = mw_vec[0]; | |
2440 mw_vec[0] = mw_vec[1]; | |
2441 mw_vec[1] = tmp; | |
2442 } | |
2443 } | |
2444 else | |
2445 qsort (mw_vec, count, sizeof (struct df_mw_hardreg *), df_mw_compare); | |
2446 | |
2447 for (i=0; i<count-dist; i++) | |
2448 { | |
2449 /* Find the next ref that is not equal to the current ref. */ | |
2450 while (df_mw_equal_p (mw_vec[i], mw_vec[i + dist + 1])) | |
2451 { | |
2452 pool_free (problem_data->mw_reg_pool, mw_vec[i + dist + 1]); | |
2453 dist++; | |
2454 } | |
2455 /* Copy it down to the next position. */ | |
2456 if (dist) | |
2457 mw_vec[i+1] = mw_vec[i + dist + 1]; | |
2458 } | |
2459 | |
2460 count -= dist; | |
2461 mw_vec[count] = NULL; | |
2462 return count; | |
2463 } | |
2464 | |
2465 | |
2466 /* Sort and remove duplicates from the COLLECTION_REC. */ | |
2467 | |
2468 static void | |
2469 df_canonize_collection_rec (struct df_collection_rec *collection_rec) | |
2470 { | |
2471 if (collection_rec->def_vec) | |
2472 collection_rec->next_def | |
2473 = df_sort_and_compress_refs (collection_rec->def_vec, | |
2474 collection_rec->next_def); | |
2475 if (collection_rec->use_vec) | |
2476 collection_rec->next_use | |
2477 = df_sort_and_compress_refs (collection_rec->use_vec, | |
2478 collection_rec->next_use); | |
2479 if (collection_rec->eq_use_vec) | |
2480 collection_rec->next_eq_use | |
2481 = df_sort_and_compress_refs (collection_rec->eq_use_vec, | |
2482 collection_rec->next_eq_use); | |
2483 if (collection_rec->mw_vec) | |
2484 collection_rec->next_mw | |
2485 = df_sort_and_compress_mws (collection_rec->mw_vec, | |
2486 collection_rec->next_mw); | |
2487 } | |
2488 | |
2489 | |
2490 /* Add the new df_ref to appropriate reg_info/ref_info chains. */ | |
2491 | |
2492 static void | |
2493 df_install_ref (df_ref this_ref, | |
2494 struct df_reg_info *reg_info, | |
2495 struct df_ref_info *ref_info, | |
2496 bool add_to_table) | |
2497 { | |
2498 unsigned int regno = DF_REF_REGNO (this_ref); | |
2499 /* Add the ref to the reg_{def,use,eq_use} chain. */ | |
2500 df_ref head = reg_info->reg_chain; | |
2501 | |
2502 reg_info->reg_chain = this_ref; | |
2503 reg_info->n_refs++; | |
2504 | |
2505 if (DF_REF_FLAGS_IS_SET (this_ref, DF_HARD_REG_LIVE)) | |
2506 { | |
2507 gcc_assert (regno < FIRST_PSEUDO_REGISTER); | |
2508 df->hard_regs_live_count[regno]++; | |
2509 } | |
2510 | |
2511 gcc_assert (DF_REF_NEXT_REG (this_ref) == NULL); | |
2512 gcc_assert (DF_REF_PREV_REG (this_ref) == NULL); | |
2513 | |
2514 DF_REF_NEXT_REG (this_ref) = head; | |
2515 | |
2516 /* We cannot actually link to the head of the chain. */ | |
2517 DF_REF_PREV_REG (this_ref) = NULL; | |
2518 | |
2519 if (head) | |
2520 DF_REF_PREV_REG (head) = this_ref; | |
2521 | |
2522 if (add_to_table) | |
2523 { | |
2524 gcc_assert (ref_info->ref_order != DF_REF_ORDER_NO_TABLE); | |
2525 df_check_and_grow_ref_info (ref_info, 1); | |
2526 DF_REF_ID (this_ref) = ref_info->table_size; | |
2527 /* Add the ref to the big array of defs. */ | |
2528 ref_info->refs[ref_info->table_size] = this_ref; | |
2529 ref_info->table_size++; | |
2530 } | |
2531 else | |
2532 DF_REF_ID (this_ref) = -1; | |
2533 | |
2534 ref_info->total_size++; | |
2535 } | |
2536 | |
2537 | |
2538 /* This function takes one of the groups of refs (defs, uses or | |
2539 eq_uses) and installs the entire group into the insn. It also adds | |
2540 each of these refs into the appropriate chains. */ | |
2541 | |
2542 static df_ref * | |
2543 df_install_refs (basic_block bb, | |
2544 df_ref *old_vec, unsigned int count, | |
2545 struct df_reg_info **reg_info, | |
2546 struct df_ref_info *ref_info, | |
2547 bool is_notes) | |
2548 { | |
2549 if (count) | |
2550 { | |
2551 unsigned int i; | |
2552 df_ref *new_vec = XNEWVEC (df_ref, count + 1); | |
2553 bool add_to_table; | |
2554 | |
2555 switch (ref_info->ref_order) | |
2556 { | |
2557 case DF_REF_ORDER_UNORDERED_WITH_NOTES: | |
2558 case DF_REF_ORDER_BY_REG_WITH_NOTES: | |
2559 case DF_REF_ORDER_BY_INSN_WITH_NOTES: | |
2560 ref_info->ref_order = DF_REF_ORDER_UNORDERED_WITH_NOTES; | |
2561 add_to_table = true; | |
2562 break; | |
2563 case DF_REF_ORDER_UNORDERED: | |
2564 case DF_REF_ORDER_BY_REG: | |
2565 case DF_REF_ORDER_BY_INSN: | |
2566 ref_info->ref_order = DF_REF_ORDER_UNORDERED; | |
2567 add_to_table = !is_notes; | |
2568 break; | |
2569 default: | |
2570 add_to_table = false; | |
2571 break; | |
2572 } | |
2573 | |
2574 /* Do not add if ref is not in the right blocks. */ | |
2575 if (add_to_table && df->analyze_subset) | |
2576 add_to_table = bitmap_bit_p (df->blocks_to_analyze, bb->index); | |
2577 | |
2578 for (i = 0; i < count; i++) | |
2579 { | |
2580 df_ref this_ref = old_vec[i]; | |
2581 new_vec[i] = this_ref; | |
2582 df_install_ref (this_ref, reg_info[DF_REF_REGNO (this_ref)], | |
2583 ref_info, add_to_table); | |
2584 } | |
2585 | |
2586 new_vec[count] = NULL; | |
2587 return new_vec; | |
2588 } | |
2589 else | |
2590 return df_null_ref_rec; | |
2591 } | |
2592 | |
2593 | |
2594 /* This function takes the mws installs the entire group into the | |
2595 insn. */ | |
2596 | |
2597 static struct df_mw_hardreg ** | |
2598 df_install_mws (struct df_mw_hardreg **old_vec, unsigned int count) | |
2599 { | |
2600 if (count) | |
2601 { | |
2602 struct df_mw_hardreg **new_vec | |
2603 = XNEWVEC (struct df_mw_hardreg*, count + 1); | |
2604 memcpy (new_vec, old_vec, | |
2605 sizeof (struct df_mw_hardreg*) * (count + 1)); | |
2606 return new_vec; | |
2607 } | |
2608 else | |
2609 return df_null_mw_rec; | |
2610 } | |
2611 | |
2612 | |
2613 /* Add a chain of df_refs to appropriate ref chain/reg_info/ref_info | |
2614 chains and update other necessary information. */ | |
2615 | |
2616 static void | |
2617 df_refs_add_to_chains (struct df_collection_rec *collection_rec, | |
2618 basic_block bb, rtx insn) | |
2619 { | |
2620 if (insn) | |
2621 { | |
2622 struct df_insn_info *insn_rec = DF_INSN_INFO_GET (insn); | |
2623 /* If there is a vector in the collection rec, add it to the | |
2624 insn. A null rec is a signal that the caller will handle the | |
2625 chain specially. */ | |
2626 if (collection_rec->def_vec) | |
2627 { | |
2628 df_scan_free_ref_vec (insn_rec->defs); | |
2629 insn_rec->defs | |
2630 = df_install_refs (bb, collection_rec->def_vec, | |
2631 collection_rec->next_def, | |
2632 df->def_regs, | |
2633 &df->def_info, false); | |
2634 } | |
2635 if (collection_rec->use_vec) | |
2636 { | |
2637 df_scan_free_ref_vec (insn_rec->uses); | |
2638 insn_rec->uses | |
2639 = df_install_refs (bb, collection_rec->use_vec, | |
2640 collection_rec->next_use, | |
2641 df->use_regs, | |
2642 &df->use_info, false); | |
2643 } | |
2644 if (collection_rec->eq_use_vec) | |
2645 { | |
2646 df_scan_free_ref_vec (insn_rec->eq_uses); | |
2647 insn_rec->eq_uses | |
2648 = df_install_refs (bb, collection_rec->eq_use_vec, | |
2649 collection_rec->next_eq_use, | |
2650 df->eq_use_regs, | |
2651 &df->use_info, true); | |
2652 } | |
2653 if (collection_rec->mw_vec) | |
2654 { | |
2655 df_scan_free_mws_vec (insn_rec->mw_hardregs); | |
2656 insn_rec->mw_hardregs | |
2657 = df_install_mws (collection_rec->mw_vec, | |
2658 collection_rec->next_mw); | |
2659 } | |
2660 } | |
2661 else | |
2662 { | |
2663 struct df_scan_bb_info *bb_info = df_scan_get_bb_info (bb->index); | |
2664 | |
2665 df_scan_free_ref_vec (bb_info->artificial_defs); | |
2666 bb_info->artificial_defs | |
2667 = df_install_refs (bb, collection_rec->def_vec, | |
2668 collection_rec->next_def, | |
2669 df->def_regs, | |
2670 &df->def_info, false); | |
2671 df_scan_free_ref_vec (bb_info->artificial_uses); | |
2672 bb_info->artificial_uses | |
2673 = df_install_refs (bb, collection_rec->use_vec, | |
2674 collection_rec->next_use, | |
2675 df->use_regs, | |
2676 &df->use_info, false); | |
2677 } | |
2678 } | |
2679 | |
2680 | |
2681 /* Allocate a ref and initialize its fields. | |
2682 | |
2683 If the REF_FLAGS field contain DF_REF_SIGN_EXTRACT or | |
2684 DF_REF_ZERO_EXTRACT. WIDTH, OFFSET and MODE are used to access the fields | |
2685 if they were constants. Otherwise they should be -1 if those flags | |
2686 were set. */ | |
2687 | |
2688 static df_ref | |
2689 df_ref_create_structure (enum df_ref_class cl, | |
2690 struct df_collection_rec *collection_rec, | |
2691 rtx reg, rtx *loc, | |
2692 basic_block bb, struct df_insn_info *info, | |
2693 enum df_ref_type ref_type, | |
2694 enum df_ref_flags ref_flags, | |
2695 int width, int offset, enum machine_mode mode) | |
2696 { | |
2697 df_ref this_ref = NULL; | |
2698 int regno = REGNO (GET_CODE (reg) == SUBREG ? SUBREG_REG (reg) : reg); | |
2699 struct df_scan_problem_data *problem_data | |
2700 = (struct df_scan_problem_data *) df_scan->problem_data; | |
2701 | |
2702 switch (cl) | |
2703 { | |
2704 case DF_REF_BASE: | |
2705 this_ref = (df_ref) pool_alloc (problem_data->ref_base_pool); | |
2706 gcc_assert (loc == NULL); | |
2707 break; | |
2708 | |
2709 case DF_REF_ARTIFICIAL: | |
2710 this_ref = (df_ref) pool_alloc (problem_data->ref_artificial_pool); | |
2711 this_ref->artificial_ref.bb = bb; | |
2712 gcc_assert (loc == NULL); | |
2713 break; | |
2714 | |
2715 case DF_REF_REGULAR: | |
2716 this_ref = (df_ref) pool_alloc (problem_data->ref_regular_pool); | |
2717 this_ref->regular_ref.loc = loc; | |
2718 gcc_assert (loc); | |
2719 break; | |
2720 | |
2721 case DF_REF_EXTRACT: | |
2722 this_ref = (df_ref) pool_alloc (problem_data->ref_extract_pool); | |
2723 DF_REF_EXTRACT_WIDTH (this_ref) = width; | |
2724 DF_REF_EXTRACT_OFFSET (this_ref) = offset; | |
2725 DF_REF_EXTRACT_MODE (this_ref) = mode; | |
2726 this_ref->regular_ref.loc = loc; | |
2727 gcc_assert (loc); | |
2728 break; | |
2729 } | |
2730 | |
2731 DF_REF_CLASS (this_ref) = cl; | |
2732 DF_REF_ID (this_ref) = -1; | |
2733 DF_REF_REG (this_ref) = reg; | |
2734 DF_REF_REGNO (this_ref) = regno; | |
2735 DF_REF_TYPE (this_ref) = ref_type; | |
2736 DF_REF_INSN_INFO (this_ref) = info; | |
2737 DF_REF_CHAIN (this_ref) = NULL; | |
2738 DF_REF_FLAGS (this_ref) = ref_flags; | |
2739 DF_REF_NEXT_REG (this_ref) = NULL; | |
2740 DF_REF_PREV_REG (this_ref) = NULL; | |
2741 DF_REF_ORDER (this_ref) = df->ref_order++; | |
2742 | |
2743 /* We need to clear this bit because fwprop, and in the future | |
2744 possibly other optimizations sometimes create new refs using ond | |
2745 refs as the model. */ | |
2746 DF_REF_FLAGS_CLEAR (this_ref, DF_HARD_REG_LIVE); | |
2747 | |
2748 /* See if this ref needs to have DF_HARD_REG_LIVE bit set. */ | |
2749 if ((regno < FIRST_PSEUDO_REGISTER) | |
2750 && (!DF_REF_IS_ARTIFICIAL (this_ref))) | |
2751 { | |
2752 if (DF_REF_REG_DEF_P (this_ref)) | |
2753 { | |
2754 if (!DF_REF_FLAGS_IS_SET (this_ref, DF_REF_MAY_CLOBBER)) | |
2755 DF_REF_FLAGS_SET (this_ref, DF_HARD_REG_LIVE); | |
2756 } | |
2757 else if (!(TEST_HARD_REG_BIT (elim_reg_set, regno) | |
2758 && (regno == FRAME_POINTER_REGNUM | |
2759 || regno == ARG_POINTER_REGNUM))) | |
2760 DF_REF_FLAGS_SET (this_ref, DF_HARD_REG_LIVE); | |
2761 } | |
2762 | |
2763 if (collection_rec) | |
2764 { | |
2765 if (DF_REF_REG_DEF_P (this_ref)) | |
2766 collection_rec->def_vec[collection_rec->next_def++] = this_ref; | |
2767 else if (DF_REF_FLAGS (this_ref) & DF_REF_IN_NOTE) | |
2768 collection_rec->eq_use_vec[collection_rec->next_eq_use++] = this_ref; | |
2769 else | |
2770 collection_rec->use_vec[collection_rec->next_use++] = this_ref; | |
2771 } | |
2772 | |
2773 return this_ref; | |
2774 } | |
2775 | |
2776 | |
2777 /* Create new references of type DF_REF_TYPE for each part of register REG | |
2778 at address LOC within INSN of BB. | |
2779 | |
2780 If the REF_FLAGS field contain DF_REF_SIGN_EXTRACT or | |
2781 DF_REF_ZERO_EXTRACT. WIDTH, OFFSET and MODE are used to access the | |
2782 fields if they were constants. Otherwise they should be -1 if | |
2783 those flags were set. */ | |
2784 | |
2785 | |
2786 static void | |
2787 df_ref_record (enum df_ref_class cl, | |
2788 struct df_collection_rec *collection_rec, | |
2789 rtx reg, rtx *loc, | |
2790 basic_block bb, struct df_insn_info *insn_info, | |
2791 enum df_ref_type ref_type, | |
2792 enum df_ref_flags ref_flags, | |
2793 int width, int offset, enum machine_mode mode) | |
2794 { | |
2795 unsigned int regno; | |
2796 | |
2797 gcc_assert (REG_P (reg) || GET_CODE (reg) == SUBREG); | |
2798 | |
2799 regno = REGNO (GET_CODE (reg) == SUBREG ? SUBREG_REG (reg) : reg); | |
2800 if (regno < FIRST_PSEUDO_REGISTER) | |
2801 { | |
2802 struct df_mw_hardreg *hardreg = NULL; | |
2803 struct df_scan_problem_data *problem_data | |
2804 = (struct df_scan_problem_data *) df_scan->problem_data; | |
2805 unsigned int i; | |
2806 unsigned int endregno; | |
2807 df_ref ref; | |
2808 | |
2809 if (GET_CODE (reg) == SUBREG) | |
2810 { | |
2811 regno += subreg_regno_offset (regno, GET_MODE (SUBREG_REG (reg)), | |
2812 SUBREG_BYTE (reg), GET_MODE (reg)); | |
2813 endregno = regno + subreg_nregs (reg); | |
2814 } | |
2815 else | |
2816 endregno = END_HARD_REGNO (reg); | |
2817 | |
2818 /* If this is a multiword hardreg, we create some extra | |
2819 datastructures that will enable us to easily build REG_DEAD | |
2820 and REG_UNUSED notes. */ | |
2821 if ((endregno != regno + 1) && insn_info) | |
2822 { | |
2823 /* Sets to a subreg of a multiword register are partial. | |
2824 Sets to a non-subreg of a multiword register are not. */ | |
2825 if (GET_CODE (reg) == SUBREG) | |
2826 ref_flags |= DF_REF_PARTIAL; | |
2827 ref_flags |= DF_REF_MW_HARDREG; | |
2828 | |
2829 hardreg = (struct df_mw_hardreg *) pool_alloc (problem_data->mw_reg_pool); | |
2830 hardreg->type = ref_type; | |
2831 hardreg->flags = ref_flags; | |
2832 hardreg->mw_reg = reg; | |
2833 hardreg->start_regno = regno; | |
2834 hardreg->end_regno = endregno - 1; | |
2835 hardreg->mw_order = df->ref_order++; | |
2836 collection_rec->mw_vec[collection_rec->next_mw++] = hardreg; | |
2837 } | |
2838 | |
2839 for (i = regno; i < endregno; i++) | |
2840 { | |
2841 ref = df_ref_create_structure (cl, collection_rec, regno_reg_rtx[i], loc, | |
2842 bb, insn_info, ref_type, ref_flags, | |
2843 width, offset, mode); | |
2844 | |
2845 gcc_assert (ORIGINAL_REGNO (DF_REF_REG (ref)) == i); | |
2846 } | |
2847 } | |
2848 else | |
2849 { | |
2850 df_ref_create_structure (cl, collection_rec, reg, loc, bb, insn_info, | |
2851 ref_type, ref_flags, width, offset, mode); | |
2852 } | |
2853 } | |
2854 | |
2855 | |
2856 /* A set to a non-paradoxical SUBREG for which the number of word_mode units | |
2857 covered by the outer mode is smaller than that covered by the inner mode, | |
2858 is a read-modify-write operation. | |
2859 This function returns true iff the SUBREG X is such a SUBREG. */ | |
2860 | |
2861 bool | |
2862 df_read_modify_subreg_p (rtx x) | |
2863 { | |
2864 unsigned int isize, osize; | |
2865 if (GET_CODE (x) != SUBREG) | |
2866 return false; | |
2867 isize = GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))); | |
2868 osize = GET_MODE_SIZE (GET_MODE (x)); | |
2869 return isize > osize | |
2870 && isize > REGMODE_NATURAL_SIZE (GET_MODE (SUBREG_REG (x))); | |
2871 } | |
2872 | |
2873 | |
2874 /* Process all the registers defined in the rtx, X. | |
2875 Autoincrement/decrement definitions will be picked up by | |
2876 df_uses_record. */ | |
2877 | |
2878 static void | |
2879 df_def_record_1 (struct df_collection_rec *collection_rec, | |
2880 rtx x, basic_block bb, struct df_insn_info *insn_info, | |
2881 enum df_ref_flags flags) | |
2882 { | |
2883 rtx *loc; | |
2884 rtx dst; | |
2885 int offset = -1; | |
2886 int width = -1; | |
2887 enum machine_mode mode = 0; | |
2888 enum df_ref_class cl = DF_REF_REGULAR; | |
2889 | |
2890 /* We may recursively call ourselves on EXPR_LIST when dealing with PARALLEL | |
2891 construct. */ | |
2892 if (GET_CODE (x) == EXPR_LIST || GET_CODE (x) == CLOBBER) | |
2893 loc = &XEXP (x, 0); | |
2894 else | |
2895 loc = &SET_DEST (x); | |
2896 dst = *loc; | |
2897 | |
2898 /* It is legal to have a set destination be a parallel. */ | |
2899 if (GET_CODE (dst) == PARALLEL) | |
2900 { | |
2901 int i; | |
2902 | |
2903 for (i = XVECLEN (dst, 0) - 1; i >= 0; i--) | |
2904 { | |
2905 rtx temp = XVECEXP (dst, 0, i); | |
2906 if (GET_CODE (temp) == EXPR_LIST || GET_CODE (temp) == CLOBBER | |
2907 || GET_CODE (temp) == SET) | |
2908 df_def_record_1 (collection_rec, | |
2909 temp, bb, insn_info, | |
2910 GET_CODE (temp) == CLOBBER | |
2911 ? flags | DF_REF_MUST_CLOBBER : flags); | |
2912 } | |
2913 return; | |
2914 } | |
2915 | |
2916 if (GET_CODE (dst) == STRICT_LOW_PART) | |
2917 { | |
2918 flags |= DF_REF_READ_WRITE | DF_REF_PARTIAL | DF_REF_STRICT_LOW_PART; | |
2919 | |
2920 loc = &XEXP (dst, 0); | |
2921 dst = *loc; | |
2922 } | |
2923 | |
2924 if (GET_CODE (dst) == ZERO_EXTRACT) | |
2925 { | |
2926 flags |= DF_REF_READ_WRITE | DF_REF_PARTIAL | DF_REF_ZERO_EXTRACT; | |
2927 | |
2928 if (GET_CODE (XEXP (dst, 1)) == CONST_INT | |
2929 && GET_CODE (XEXP (dst, 2)) == CONST_INT) | |
2930 { | |
2931 width = INTVAL (XEXP (dst, 1)); | |
2932 offset = INTVAL (XEXP (dst, 2)); | |
2933 mode = GET_MODE (dst); | |
2934 cl = DF_REF_EXTRACT; | |
2935 } | |
2936 | |
2937 loc = &XEXP (dst, 0); | |
2938 dst = *loc; | |
2939 } | |
2940 | |
2941 /* At this point if we do not have a reg or a subreg, just return. */ | |
2942 if (REG_P (dst)) | |
2943 { | |
2944 df_ref_record (cl, collection_rec, | |
2945 dst, loc, bb, insn_info, DF_REF_REG_DEF, flags, | |
2946 width, offset, mode); | |
2947 | |
2948 /* We want to keep sp alive everywhere - by making all | |
2949 writes to sp also use of sp. */ | |
2950 if (REGNO (dst) == STACK_POINTER_REGNUM) | |
2951 df_ref_record (DF_REF_BASE, collection_rec, | |
2952 dst, NULL, bb, insn_info, DF_REF_REG_USE, flags, | |
2953 width, offset, mode); | |
2954 } | |
2955 else if (GET_CODE (dst) == SUBREG && REG_P (SUBREG_REG (dst))) | |
2956 { | |
2957 if (df_read_modify_subreg_p (dst)) | |
2958 flags |= DF_REF_READ_WRITE | DF_REF_PARTIAL; | |
2959 | |
2960 flags |= DF_REF_SUBREG; | |
2961 | |
2962 df_ref_record (cl, collection_rec, | |
2963 dst, loc, bb, insn_info, DF_REF_REG_DEF, flags, | |
2964 width, offset, mode); | |
2965 } | |
2966 } | |
2967 | |
2968 | |
2969 /* Process all the registers defined in the pattern rtx, X. */ | |
2970 | |
2971 static void | |
2972 df_defs_record (struct df_collection_rec *collection_rec, | |
2973 rtx x, basic_block bb, struct df_insn_info *insn_info, | |
2974 enum df_ref_flags flags) | |
2975 { | |
2976 RTX_CODE code = GET_CODE (x); | |
2977 | |
2978 if (code == SET || code == CLOBBER) | |
2979 { | |
2980 /* Mark the single def within the pattern. */ | |
2981 enum df_ref_flags clobber_flags = flags; | |
2982 clobber_flags |= (code == CLOBBER) ? DF_REF_MUST_CLOBBER : 0; | |
2983 df_def_record_1 (collection_rec, x, bb, insn_info, clobber_flags); | |
2984 } | |
2985 else if (code == COND_EXEC) | |
2986 { | |
2987 df_defs_record (collection_rec, COND_EXEC_CODE (x), | |
2988 bb, insn_info, DF_REF_CONDITIONAL); | |
2989 } | |
2990 else if (code == PARALLEL) | |
2991 { | |
2992 int i; | |
2993 | |
2994 /* Mark the multiple defs within the pattern. */ | |
2995 for (i = XVECLEN (x, 0) - 1; i >= 0; i--) | |
2996 df_defs_record (collection_rec, XVECEXP (x, 0, i), bb, insn_info, flags); | |
2997 } | |
2998 } | |
2999 | |
3000 | |
3001 /* Process all the registers used in the rtx at address LOC. | |
3002 | |
3003 If the REF_FLAGS field contain DF_REF_SIGN_EXTRACT or | |
3004 DF_REF_ZERO_EXTRACT. WIDTH, OFFSET and MODE are used to access the | |
3005 fields if they were constants. Otherwise they should be -1 if | |
3006 those flags were set. */ | |
3007 | |
3008 static void | |
3009 df_uses_record (enum df_ref_class cl, struct df_collection_rec *collection_rec, | |
3010 rtx *loc, enum df_ref_type ref_type, | |
3011 basic_block bb, struct df_insn_info *insn_info, | |
3012 enum df_ref_flags flags, | |
3013 int width, int offset, enum machine_mode mode) | |
3014 { | |
3015 RTX_CODE code; | |
3016 rtx x; | |
3017 | |
3018 retry: | |
3019 x = *loc; | |
3020 if (!x) | |
3021 return; | |
3022 code = GET_CODE (x); | |
3023 switch (code) | |
3024 { | |
3025 case LABEL_REF: | |
3026 case SYMBOL_REF: | |
3027 case CONST_INT: | |
3028 case CONST: | |
3029 case CONST_DOUBLE: | |
3030 case CONST_FIXED: | |
3031 case CONST_VECTOR: | |
3032 case PC: | |
3033 case CC0: | |
3034 case ADDR_VEC: | |
3035 case ADDR_DIFF_VEC: | |
3036 return; | |
3037 | |
3038 case CLOBBER: | |
3039 /* If we are clobbering a MEM, mark any registers inside the address | |
3040 as being used. */ | |
3041 if (MEM_P (XEXP (x, 0))) | |
3042 df_uses_record (cl, collection_rec, | |
3043 &XEXP (XEXP (x, 0), 0), | |
3044 DF_REF_REG_MEM_STORE, | |
3045 bb, insn_info, | |
3046 flags, width, offset, mode); | |
3047 | |
3048 /* If we're clobbering a REG then we have a def so ignore. */ | |
3049 return; | |
3050 | |
3051 case MEM: | |
3052 df_uses_record (cl, collection_rec, | |
3053 &XEXP (x, 0), DF_REF_REG_MEM_LOAD, | |
3054 bb, insn_info, flags & DF_REF_IN_NOTE, | |
3055 width, offset, mode); | |
3056 return; | |
3057 | |
3058 case SUBREG: | |
3059 /* While we're here, optimize this case. */ | |
3060 flags |= DF_REF_PARTIAL; | |
3061 /* In case the SUBREG is not of a REG, do not optimize. */ | |
3062 if (!REG_P (SUBREG_REG (x))) | |
3063 { | |
3064 loc = &SUBREG_REG (x); | |
3065 df_uses_record (cl, collection_rec, loc, ref_type, bb, insn_info, flags, | |
3066 width, offset, mode); | |
3067 return; | |
3068 } | |
3069 /* ... Fall through ... */ | |
3070 | |
3071 case REG: | |
3072 df_ref_record (cl, collection_rec, | |
3073 x, loc, bb, insn_info, | |
3074 ref_type, flags, | |
3075 width, offset, mode); | |
3076 return; | |
3077 | |
3078 case SIGN_EXTRACT: | |
3079 case ZERO_EXTRACT: | |
3080 { | |
3081 /* If the parameters to the zero or sign extract are | |
3082 constants, strip them off and recurse, otherwise there is | |
3083 no information that we can gain from this operation. */ | |
3084 if (GET_CODE (XEXP (x, 1)) == CONST_INT | |
3085 && GET_CODE (XEXP (x, 2)) == CONST_INT) | |
3086 { | |
3087 width = INTVAL (XEXP (x, 1)); | |
3088 offset = INTVAL (XEXP (x, 2)); | |
3089 mode = GET_MODE (x); | |
3090 | |
3091 if (code == ZERO_EXTRACT) | |
3092 flags |= DF_REF_ZERO_EXTRACT; | |
3093 else | |
3094 flags |= DF_REF_SIGN_EXTRACT; | |
3095 | |
3096 df_uses_record (DF_REF_EXTRACT, collection_rec, | |
3097 &XEXP (x, 0), ref_type, bb, insn_info, flags, | |
3098 width, offset, mode); | |
3099 return; | |
3100 } | |
3101 } | |
3102 break; | |
3103 | |
3104 case SET: | |
3105 { | |
3106 rtx dst = SET_DEST (x); | |
3107 gcc_assert (!(flags & DF_REF_IN_NOTE)); | |
3108 df_uses_record (cl, collection_rec, | |
3109 &SET_SRC (x), DF_REF_REG_USE, bb, insn_info, flags, | |
3110 width, offset, mode); | |
3111 | |
3112 switch (GET_CODE (dst)) | |
3113 { | |
3114 case SUBREG: | |
3115 if (df_read_modify_subreg_p (dst)) | |
3116 { | |
3117 df_uses_record (cl, collection_rec, &SUBREG_REG (dst), | |
3118 DF_REF_REG_USE, bb, insn_info, | |
3119 flags | DF_REF_READ_WRITE | DF_REF_SUBREG, | |
3120 width, offset, mode); | |
3121 break; | |
3122 } | |
3123 /* Fall through. */ | |
3124 case REG: | |
3125 case PARALLEL: | |
3126 case SCRATCH: | |
3127 case PC: | |
3128 case CC0: | |
3129 break; | |
3130 case MEM: | |
3131 df_uses_record (cl, collection_rec, &XEXP (dst, 0), | |
3132 DF_REF_REG_MEM_STORE, bb, insn_info, flags, | |
3133 width, offset, mode); | |
3134 break; | |
3135 case STRICT_LOW_PART: | |
3136 { | |
3137 rtx *temp = &XEXP (dst, 0); | |
3138 /* A strict_low_part uses the whole REG and not just the | |
3139 SUBREG. */ | |
3140 dst = XEXP (dst, 0); | |
3141 df_uses_record (cl, collection_rec, | |
3142 (GET_CODE (dst) == SUBREG) ? &SUBREG_REG (dst) : temp, | |
3143 DF_REF_REG_USE, bb, insn_info, | |
3144 DF_REF_READ_WRITE | DF_REF_STRICT_LOW_PART, | |
3145 width, offset, mode); | |
3146 } | |
3147 break; | |
3148 case ZERO_EXTRACT: | |
3149 { | |
3150 if (GET_CODE (XEXP (dst, 1)) == CONST_INT | |
3151 && GET_CODE (XEXP (dst, 2)) == CONST_INT) | |
3152 { | |
3153 width = INTVAL (XEXP (dst, 1)); | |
3154 offset = INTVAL (XEXP (dst, 2)); | |
3155 mode = GET_MODE (dst); | |
3156 df_uses_record (DF_REF_EXTRACT, collection_rec, &XEXP (dst, 0), | |
3157 DF_REF_REG_USE, bb, insn_info, | |
3158 DF_REF_READ_WRITE | DF_REF_ZERO_EXTRACT, | |
3159 width, offset, mode); | |
3160 } | |
3161 else | |
3162 { | |
3163 df_uses_record (cl, collection_rec, &XEXP (dst, 1), | |
3164 DF_REF_REG_USE, bb, insn_info, flags, | |
3165 width, offset, mode); | |
3166 df_uses_record (cl, collection_rec, &XEXP (dst, 2), | |
3167 DF_REF_REG_USE, bb, insn_info, flags, | |
3168 width, offset, mode); | |
3169 df_uses_record (cl, collection_rec, &XEXP (dst, 0), | |
3170 DF_REF_REG_USE, bb, insn_info, | |
3171 DF_REF_READ_WRITE | DF_REF_ZERO_EXTRACT, | |
3172 width, offset, mode); | |
3173 } | |
3174 | |
3175 } | |
3176 break; | |
3177 | |
3178 default: | |
3179 gcc_unreachable (); | |
3180 } | |
3181 return; | |
3182 } | |
3183 | |
3184 case RETURN: | |
3185 break; | |
3186 | |
3187 case ASM_OPERANDS: | |
3188 case UNSPEC_VOLATILE: | |
3189 case TRAP_IF: | |
3190 case ASM_INPUT: | |
3191 { | |
3192 /* Traditional and volatile asm instructions must be | |
3193 considered to use and clobber all hard registers, all | |
3194 pseudo-registers and all of memory. So must TRAP_IF and | |
3195 UNSPEC_VOLATILE operations. | |
3196 | |
3197 Consider for instance a volatile asm that changes the fpu | |
3198 rounding mode. An insn should not be moved across this | |
3199 even if it only uses pseudo-regs because it might give an | |
3200 incorrectly rounded result. | |
3201 | |
3202 However, flow.c's liveness computation did *not* do this, | |
3203 giving the reasoning as " ?!? Unfortunately, marking all | |
3204 hard registers as live causes massive problems for the | |
3205 register allocator and marking all pseudos as live creates | |
3206 mountains of uninitialized variable warnings." | |
3207 | |
3208 In order to maintain the status quo with regard to liveness | |
3209 and uses, we do what flow.c did and just mark any regs we | |
3210 can find in ASM_OPERANDS as used. In global asm insns are | |
3211 scanned and regs_asm_clobbered is filled out. | |
3212 | |
3213 For all ASM_OPERANDS, we must traverse the vector of input | |
3214 operands. We can not just fall through here since then we | |
3215 would be confused by the ASM_INPUT rtx inside ASM_OPERANDS, | |
3216 which do not indicate traditional asms unlike their normal | |
3217 usage. */ | |
3218 if (code == ASM_OPERANDS) | |
3219 { | |
3220 int j; | |
3221 | |
3222 for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++) | |
3223 df_uses_record (cl, collection_rec, &ASM_OPERANDS_INPUT (x, j), | |
3224 DF_REF_REG_USE, bb, insn_info, flags, | |
3225 width, offset, mode); | |
3226 return; | |
3227 } | |
3228 break; | |
3229 } | |
3230 | |
3231 case PRE_DEC: | |
3232 case POST_DEC: | |
3233 case PRE_INC: | |
3234 case POST_INC: | |
3235 case PRE_MODIFY: | |
3236 case POST_MODIFY: | |
3237 /* Catch the def of the register being modified. */ | |
3238 df_ref_record (cl, collection_rec, XEXP (x, 0), &XEXP (x, 0), | |
3239 bb, insn_info, | |
3240 DF_REF_REG_DEF, | |
3241 flags | DF_REF_READ_WRITE | DF_REF_PRE_POST_MODIFY, | |
3242 width, offset, mode); | |
3243 | |
3244 /* ... Fall through to handle uses ... */ | |
3245 | |
3246 default: | |
3247 break; | |
3248 } | |
3249 | |
3250 /* Recursively scan the operands of this expression. */ | |
3251 { | |
3252 const char *fmt = GET_RTX_FORMAT (code); | |
3253 int i; | |
3254 | |
3255 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--) | |
3256 { | |
3257 if (fmt[i] == 'e') | |
3258 { | |
3259 /* Tail recursive case: save a function call level. */ | |
3260 if (i == 0) | |
3261 { | |
3262 loc = &XEXP (x, 0); | |
3263 goto retry; | |
3264 } | |
3265 df_uses_record (cl, collection_rec, &XEXP (x, i), ref_type, | |
3266 bb, insn_info, flags, | |
3267 width, offset, mode); | |
3268 } | |
3269 else if (fmt[i] == 'E') | |
3270 { | |
3271 int j; | |
3272 for (j = 0; j < XVECLEN (x, i); j++) | |
3273 df_uses_record (cl, collection_rec, | |
3274 &XVECEXP (x, i, j), ref_type, | |
3275 bb, insn_info, flags, | |
3276 width, offset, mode); | |
3277 } | |
3278 } | |
3279 } | |
3280 | |
3281 return; | |
3282 } | |
3283 | |
3284 | |
3285 /* For all DF_REF_CONDITIONAL defs, add a corresponding uses. */ | |
3286 | |
3287 static void | |
3288 df_get_conditional_uses (struct df_collection_rec *collection_rec) | |
3289 { | |
3290 unsigned int i; | |
3291 for (i = 0; i < collection_rec->next_def; i++) | |
3292 { | |
3293 df_ref ref = collection_rec->def_vec[i]; | |
3294 if (DF_REF_FLAGS_IS_SET (ref, DF_REF_CONDITIONAL)) | |
3295 { | |
3296 int width = -1; | |
3297 int offset = -1; | |
3298 enum machine_mode mode = 0; | |
3299 df_ref use; | |
3300 | |
3301 if (DF_REF_FLAGS_IS_SET (ref, DF_REF_SIGN_EXTRACT | DF_REF_ZERO_EXTRACT)) | |
3302 { | |
3303 width = DF_REF_EXTRACT_WIDTH (ref); | |
3304 offset = DF_REF_EXTRACT_OFFSET (ref); | |
3305 mode = DF_REF_EXTRACT_MODE (ref); | |
3306 } | |
3307 | |
3308 use = df_ref_create_structure (DF_REF_CLASS (ref), collection_rec, DF_REF_REG (ref), | |
3309 DF_REF_LOC (ref), DF_REF_BB (ref), | |
3310 DF_REF_INSN_INFO (ref), DF_REF_REG_USE, | |
3311 DF_REF_FLAGS (ref) & ~DF_REF_CONDITIONAL, | |
3312 width, offset, mode); | |
3313 DF_REF_REGNO (use) = DF_REF_REGNO (ref); | |
3314 } | |
3315 } | |
3316 } | |
3317 | |
3318 | |
3319 /* Get call's extra defs and uses. */ | |
3320 | |
3321 static void | |
3322 df_get_call_refs (struct df_collection_rec * collection_rec, | |
3323 basic_block bb, | |
3324 struct df_insn_info *insn_info, | |
3325 enum df_ref_flags flags) | |
3326 { | |
3327 rtx note; | |
3328 bitmap_iterator bi; | |
3329 unsigned int ui; | |
3330 bool is_sibling_call; | |
3331 unsigned int i; | |
3332 bitmap defs_generated = BITMAP_ALLOC (&df_bitmap_obstack); | |
3333 | |
3334 /* Do not generate clobbers for registers that are the result of the | |
3335 call. This causes ordering problems in the chain building code | |
3336 depending on which def is seen first. */ | |
3337 for (i=0; i<collection_rec->next_def; i++) | |
3338 { | |
3339 df_ref def = collection_rec->def_vec[i]; | |
3340 bitmap_set_bit (defs_generated, DF_REF_REGNO (def)); | |
3341 } | |
3342 | |
3343 /* Record the registers used to pass arguments, and explicitly | |
3344 noted as clobbered. */ | |
3345 for (note = CALL_INSN_FUNCTION_USAGE (insn_info->insn); note; | |
3346 note = XEXP (note, 1)) | |
3347 { | |
3348 if (GET_CODE (XEXP (note, 0)) == USE) | |
3349 df_uses_record (DF_REF_REGULAR, collection_rec, &XEXP (XEXP (note, 0), 0), | |
3350 DF_REF_REG_USE, bb, insn_info, flags, -1, -1, 0); | |
3351 else if (GET_CODE (XEXP (note, 0)) == CLOBBER) | |
3352 { | |
3353 if (REG_P (XEXP (XEXP (note, 0), 0))) | |
3354 { | |
3355 unsigned int regno = REGNO (XEXP (XEXP (note, 0), 0)); | |
3356 if (!bitmap_bit_p (defs_generated, regno)) | |
3357 df_defs_record (collection_rec, XEXP (note, 0), bb, | |
3358 insn_info, flags); | |
3359 } | |
3360 else | |
3361 df_uses_record (DF_REF_REGULAR, collection_rec, &XEXP (note, 0), | |
3362 DF_REF_REG_USE, bb, insn_info, flags, -1, -1, 0); | |
3363 } | |
3364 } | |
3365 | |
3366 /* The stack ptr is used (honorarily) by a CALL insn. */ | |
3367 df_ref_record (DF_REF_BASE, collection_rec, regno_reg_rtx[STACK_POINTER_REGNUM], | |
3368 NULL, bb, insn_info, DF_REF_REG_USE, | |
3369 DF_REF_CALL_STACK_USAGE | flags, | |
3370 -1, -1, 0); | |
3371 | |
3372 /* Calls may also reference any of the global registers, | |
3373 so they are recorded as used. */ | |
3374 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) | |
3375 if (global_regs[i]) | |
3376 { | |
3377 df_ref_record (DF_REF_BASE, collection_rec, regno_reg_rtx[i], | |
3378 NULL, bb, insn_info, DF_REF_REG_USE, flags, -1, -1, 0); | |
3379 df_ref_record (DF_REF_BASE, collection_rec, regno_reg_rtx[i], | |
3380 NULL, bb, insn_info, DF_REF_REG_DEF, flags, -1, -1, 0); | |
3381 } | |
3382 | |
3383 is_sibling_call = SIBLING_CALL_P (insn_info->insn); | |
3384 EXECUTE_IF_SET_IN_BITMAP (regs_invalidated_by_call_regset, 0, ui, bi) | |
3385 { | |
3386 if (!global_regs[ui] | |
3387 && (!bitmap_bit_p (defs_generated, ui)) | |
3388 && (!is_sibling_call | |
3389 || !bitmap_bit_p (df->exit_block_uses, ui) | |
3390 || refers_to_regno_p (ui, ui+1, | |
3391 crtl->return_rtx, NULL))) | |
3392 df_ref_record (DF_REF_BASE, collection_rec, regno_reg_rtx[ui], | |
3393 NULL, bb, insn_info, DF_REF_REG_DEF, | |
3394 DF_REF_MAY_CLOBBER | flags, | |
3395 -1, -1, 0); | |
3396 } | |
3397 | |
3398 BITMAP_FREE (defs_generated); | |
3399 return; | |
3400 } | |
3401 | |
3402 /* Collect all refs in the INSN. This function is free of any | |
3403 side-effect - it will create and return a lists of df_ref's in the | |
3404 COLLECTION_REC without putting those refs into existing ref chains | |
3405 and reg chains. */ | |
3406 | |
3407 static void | |
3408 df_insn_refs_collect (struct df_collection_rec* collection_rec, | |
3409 basic_block bb, struct df_insn_info *insn_info) | |
3410 { | |
3411 rtx note; | |
3412 bool is_cond_exec = (GET_CODE (PATTERN (insn_info->insn)) == COND_EXEC); | |
3413 | |
3414 /* Clear out the collection record. */ | |
3415 collection_rec->next_def = 0; | |
3416 collection_rec->next_use = 0; | |
3417 collection_rec->next_eq_use = 0; | |
3418 collection_rec->next_mw = 0; | |
3419 | |
3420 /* Record register defs. */ | |
3421 df_defs_record (collection_rec, PATTERN (insn_info->insn), bb, insn_info, 0); | |
3422 | |
3423 /* Process REG_EQUIV/REG_EQUAL notes. */ | |
3424 for (note = REG_NOTES (insn_info->insn); note; | |
3425 note = XEXP (note, 1)) | |
3426 { | |
3427 switch (REG_NOTE_KIND (note)) | |
3428 { | |
3429 case REG_EQUIV: | |
3430 case REG_EQUAL: | |
3431 df_uses_record (DF_REF_REGULAR, collection_rec, | |
3432 &XEXP (note, 0), DF_REF_REG_USE, | |
3433 bb, insn_info, DF_REF_IN_NOTE, -1, -1, 0); | |
3434 break; | |
3435 case REG_NON_LOCAL_GOTO: | |
3436 /* The frame ptr is used by a non-local goto. */ | |
3437 df_ref_record (DF_REF_BASE, collection_rec, | |
3438 regno_reg_rtx[FRAME_POINTER_REGNUM], | |
3439 NULL, bb, insn_info, | |
3440 DF_REF_REG_USE, 0, -1, -1, 0); | |
3441 #if FRAME_POINTER_REGNUM != HARD_FRAME_POINTER_REGNUM | |
3442 df_ref_record (DF_REF_BASE, collection_rec, | |
3443 regno_reg_rtx[HARD_FRAME_POINTER_REGNUM], | |
3444 NULL, bb, insn_info, | |
3445 DF_REF_REG_USE, 0, -1, -1, 0); | |
3446 #endif | |
3447 break; | |
3448 default: | |
3449 break; | |
3450 } | |
3451 } | |
3452 | |
3453 if (CALL_P (insn_info->insn)) | |
3454 df_get_call_refs (collection_rec, bb, insn_info, | |
3455 (is_cond_exec) ? DF_REF_CONDITIONAL : 0); | |
3456 | |
3457 /* Record the register uses. */ | |
3458 df_uses_record (DF_REF_REGULAR, collection_rec, | |
3459 &PATTERN (insn_info->insn), DF_REF_REG_USE, bb, insn_info, 0, | |
3460 -1, -1, 0); | |
3461 | |
3462 /* DF_REF_CONDITIONAL needs corresponding USES. */ | |
3463 if (is_cond_exec) | |
3464 df_get_conditional_uses (collection_rec); | |
3465 | |
3466 df_canonize_collection_rec (collection_rec); | |
3467 } | |
3468 | |
3469 /* Recompute the luids for the insns in BB. */ | |
3470 | |
3471 void | |
3472 df_recompute_luids (basic_block bb) | |
3473 { | |
3474 rtx insn; | |
3475 int luid = 0; | |
3476 | |
3477 df_grow_insn_info (); | |
3478 | |
3479 /* Scan the block an insn at a time from beginning to end. */ | |
3480 FOR_BB_INSNS (bb, insn) | |
3481 { | |
3482 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn); | |
3483 /* Inserting labels does not always trigger the incremental | |
3484 rescanning. */ | |
3485 if (!insn_info) | |
3486 { | |
3487 gcc_assert (!INSN_P (insn)); | |
3488 insn_info = df_insn_create_insn_record (insn); | |
3489 } | |
3490 | |
3491 DF_INSN_INFO_LUID (insn_info) = luid; | |
3492 if (INSN_P (insn)) | |
3493 luid++; | |
3494 } | |
3495 } | |
3496 | |
3497 | |
3498 /* Returns true if the function entry needs to | |
3499 define the static chain register. */ | |
3500 | |
3501 static bool | |
3502 df_need_static_chain_reg (struct function *fun) | |
3503 { | |
3504 tree fun_context = decl_function_context (fun->decl); | |
3505 return fun_context | |
3506 && DECL_NO_STATIC_CHAIN (fun_context) == false; | |
3507 } | |
3508 | |
3509 | |
3510 /* Collect all artificial refs at the block level for BB and add them | |
3511 to COLLECTION_REC. */ | |
3512 | |
3513 static void | |
3514 df_bb_refs_collect (struct df_collection_rec *collection_rec, basic_block bb) | |
3515 { | |
3516 collection_rec->next_def = 0; | |
3517 collection_rec->next_use = 0; | |
3518 collection_rec->next_eq_use = 0; | |
3519 collection_rec->next_mw = 0; | |
3520 | |
3521 if (bb->index == ENTRY_BLOCK) | |
3522 { | |
3523 df_entry_block_defs_collect (collection_rec, df->entry_block_defs); | |
3524 return; | |
3525 } | |
3526 else if (bb->index == EXIT_BLOCK) | |
3527 { | |
3528 df_exit_block_uses_collect (collection_rec, df->exit_block_uses); | |
3529 return; | |
3530 } | |
3531 | |
3532 #ifdef EH_RETURN_DATA_REGNO | |
3533 if (bb_has_eh_pred (bb)) | |
3534 { | |
3535 unsigned int i; | |
3536 /* Mark the registers that will contain data for the handler. */ | |
3537 for (i = 0; ; ++i) | |
3538 { | |
3539 unsigned regno = EH_RETURN_DATA_REGNO (i); | |
3540 if (regno == INVALID_REGNUM) | |
3541 break; | |
3542 df_ref_record (DF_REF_ARTIFICIAL, collection_rec, regno_reg_rtx[regno], NULL, | |
3543 bb, NULL, DF_REF_REG_DEF, DF_REF_AT_TOP, -1, -1, 0); | |
3544 } | |
3545 } | |
3546 #endif | |
3547 | |
3548 /* Add the hard_frame_pointer if this block is the target of a | |
3549 non-local goto. */ | |
3550 if (bb->flags & BB_NON_LOCAL_GOTO_TARGET) | |
3551 df_ref_record (DF_REF_ARTIFICIAL, collection_rec, hard_frame_pointer_rtx, NULL, | |
3552 bb, NULL, DF_REF_REG_DEF, DF_REF_AT_TOP, -1, -1, 0); | |
3553 | |
3554 /* Add the artificial uses. */ | |
3555 if (bb->index >= NUM_FIXED_BLOCKS) | |
3556 { | |
3557 bitmap_iterator bi; | |
3558 unsigned int regno; | |
3559 bitmap au = bb_has_eh_pred (bb) | |
3560 ? df->eh_block_artificial_uses | |
3561 : df->regular_block_artificial_uses; | |
3562 | |
3563 EXECUTE_IF_SET_IN_BITMAP (au, 0, regno, bi) | |
3564 { | |
3565 df_ref_record (DF_REF_ARTIFICIAL, collection_rec, regno_reg_rtx[regno], NULL, | |
3566 bb, NULL, DF_REF_REG_USE, 0, -1, -1, 0); | |
3567 } | |
3568 } | |
3569 | |
3570 df_canonize_collection_rec (collection_rec); | |
3571 } | |
3572 | |
3573 | |
3574 /* Record all the refs within the basic block BB_INDEX and scan the instructions if SCAN_INSNS. */ | |
3575 | |
3576 void | |
3577 df_bb_refs_record (int bb_index, bool scan_insns) | |
3578 { | |
3579 basic_block bb = BASIC_BLOCK (bb_index); | |
3580 rtx insn; | |
3581 int luid = 0; | |
3582 struct df_scan_bb_info *bb_info; | |
3583 struct df_collection_rec collection_rec; | |
3584 collection_rec.def_vec = XALLOCAVEC (df_ref, 1000); | |
3585 collection_rec.use_vec = XALLOCAVEC (df_ref, 1000); | |
3586 collection_rec.eq_use_vec = XALLOCAVEC (df_ref, 1000); | |
3587 collection_rec.mw_vec = XALLOCAVEC (struct df_mw_hardreg *, 100); | |
3588 | |
3589 if (!df) | |
3590 return; | |
3591 | |
3592 bb_info = df_scan_get_bb_info (bb_index); | |
3593 | |
3594 /* Need to make sure that there is a record in the basic block info. */ | |
3595 if (!bb_info) | |
3596 { | |
3597 bb_info = (struct df_scan_bb_info *) pool_alloc (df_scan->block_pool); | |
3598 df_scan_set_bb_info (bb_index, bb_info); | |
3599 bb_info->artificial_defs = NULL; | |
3600 bb_info->artificial_uses = NULL; | |
3601 } | |
3602 | |
3603 if (scan_insns) | |
3604 /* Scan the block an insn at a time from beginning to end. */ | |
3605 FOR_BB_INSNS (bb, insn) | |
3606 { | |
3607 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn); | |
3608 gcc_assert (!insn_info); | |
3609 | |
3610 insn_info = df_insn_create_insn_record (insn); | |
3611 if (INSN_P (insn)) | |
3612 { | |
3613 /* Record refs within INSN. */ | |
3614 DF_INSN_INFO_LUID (insn_info) = luid++; | |
3615 df_insn_refs_collect (&collection_rec, bb, DF_INSN_INFO_GET (insn)); | |
3616 df_refs_add_to_chains (&collection_rec, bb, insn); | |
3617 } | |
3618 DF_INSN_INFO_LUID (insn_info) = luid; | |
3619 } | |
3620 | |
3621 /* Other block level artificial refs */ | |
3622 df_bb_refs_collect (&collection_rec, bb); | |
3623 df_refs_add_to_chains (&collection_rec, bb, NULL); | |
3624 | |
3625 /* Now that the block has been processed, set the block as dirty so | |
3626 LR and LIVE will get it processed. */ | |
3627 df_set_bb_dirty (bb); | |
3628 } | |
3629 | |
3630 | |
3631 /* Get the artificial use set for a regular (i.e. non-exit/non-entry) | |
3632 block. */ | |
3633 | |
3634 static void | |
3635 df_get_regular_block_artificial_uses (bitmap regular_block_artificial_uses) | |
3636 { | |
3637 #ifdef EH_USES | |
3638 unsigned int i; | |
3639 #endif | |
3640 | |
3641 bitmap_clear (regular_block_artificial_uses); | |
3642 | |
3643 if (reload_completed) | |
3644 { | |
3645 if (frame_pointer_needed) | |
3646 bitmap_set_bit (regular_block_artificial_uses, HARD_FRAME_POINTER_REGNUM); | |
3647 } | |
3648 else | |
3649 /* Before reload, there are a few registers that must be forced | |
3650 live everywhere -- which might not already be the case for | |
3651 blocks within infinite loops. */ | |
3652 { | |
3653 /* Any reference to any pseudo before reload is a potential | |
3654 reference of the frame pointer. */ | |
3655 bitmap_set_bit (regular_block_artificial_uses, FRAME_POINTER_REGNUM); | |
3656 | |
3657 #if FRAME_POINTER_REGNUM != HARD_FRAME_POINTER_REGNUM | |
3658 bitmap_set_bit (regular_block_artificial_uses, HARD_FRAME_POINTER_REGNUM); | |
3659 #endif | |
3660 | |
3661 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM | |
3662 /* Pseudos with argument area equivalences may require | |
3663 reloading via the argument pointer. */ | |
3664 if (fixed_regs[ARG_POINTER_REGNUM]) | |
3665 bitmap_set_bit (regular_block_artificial_uses, ARG_POINTER_REGNUM); | |
3666 #endif | |
3667 | |
3668 /* Any constant, or pseudo with constant equivalences, may | |
3669 require reloading from memory using the pic register. */ | |
3670 if ((unsigned) PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM | |
3671 && fixed_regs[PIC_OFFSET_TABLE_REGNUM]) | |
3672 bitmap_set_bit (regular_block_artificial_uses, PIC_OFFSET_TABLE_REGNUM); | |
3673 } | |
3674 /* The all-important stack pointer must always be live. */ | |
3675 bitmap_set_bit (regular_block_artificial_uses, STACK_POINTER_REGNUM); | |
3676 | |
3677 #ifdef EH_USES | |
3678 /* EH_USES registers are used: | |
3679 1) at all insns that might throw (calls or with -fnon-call-exceptions | |
3680 trapping insns) | |
3681 2) in all EH edges | |
3682 3) to support backtraces and/or debugging, anywhere between their | |
3683 initialization and where they the saved registers are restored | |
3684 from them, including the cases where we don't reach the epilogue | |
3685 (noreturn call or infinite loop). */ | |
3686 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) | |
3687 if (EH_USES (i)) | |
3688 bitmap_set_bit (regular_block_artificial_uses, i); | |
3689 #endif | |
3690 } | |
3691 | |
3692 | |
3693 /* Get the artificial use set for an eh block. */ | |
3694 | |
3695 static void | |
3696 df_get_eh_block_artificial_uses (bitmap eh_block_artificial_uses) | |
3697 { | |
3698 bitmap_clear (eh_block_artificial_uses); | |
3699 | |
3700 /* The following code (down thru the arg_pointer setting APPEARS | |
3701 to be necessary because there is nothing that actually | |
3702 describes what the exception handling code may actually need | |
3703 to keep alive. */ | |
3704 if (reload_completed) | |
3705 { | |
3706 if (frame_pointer_needed) | |
3707 { | |
3708 bitmap_set_bit (eh_block_artificial_uses, FRAME_POINTER_REGNUM); | |
3709 #if FRAME_POINTER_REGNUM != HARD_FRAME_POINTER_REGNUM | |
3710 bitmap_set_bit (eh_block_artificial_uses, HARD_FRAME_POINTER_REGNUM); | |
3711 #endif | |
3712 } | |
3713 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM | |
3714 if (fixed_regs[ARG_POINTER_REGNUM]) | |
3715 bitmap_set_bit (eh_block_artificial_uses, ARG_POINTER_REGNUM); | |
3716 #endif | |
3717 } | |
3718 } | |
3719 | |
3720 | |
3721 | |
3722 /*---------------------------------------------------------------------------- | |
3723 Specialized hard register scanning functions. | |
3724 ----------------------------------------------------------------------------*/ | |
3725 | |
3726 | |
3727 /* Mark a register in SET. Hard registers in large modes get all | |
3728 of their component registers set as well. */ | |
3729 | |
3730 static void | |
3731 df_mark_reg (rtx reg, void *vset) | |
3732 { | |
3733 bitmap set = (bitmap) vset; | |
3734 int regno = REGNO (reg); | |
3735 | |
3736 gcc_assert (GET_MODE (reg) != BLKmode); | |
3737 | |
3738 bitmap_set_bit (set, regno); | |
3739 if (regno < FIRST_PSEUDO_REGISTER) | |
3740 { | |
3741 int n = hard_regno_nregs[regno][GET_MODE (reg)]; | |
3742 while (--n > 0) | |
3743 bitmap_set_bit (set, regno + n); | |
3744 } | |
3745 } | |
3746 | |
3747 | |
3748 /* Set the bit for regs that are considered being defined at the entry. */ | |
3749 | |
3750 static void | |
3751 df_get_entry_block_def_set (bitmap entry_block_defs) | |
3752 { | |
3753 rtx r; | |
3754 int i; | |
3755 | |
3756 bitmap_clear (entry_block_defs); | |
3757 | |
3758 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) | |
3759 { | |
3760 if (FUNCTION_ARG_REGNO_P (i)) | |
3761 #ifdef INCOMING_REGNO | |
3762 bitmap_set_bit (entry_block_defs, INCOMING_REGNO (i)); | |
3763 #else | |
3764 bitmap_set_bit (entry_block_defs, i); | |
3765 #endif | |
3766 } | |
3767 | |
3768 /* The always important stack pointer. */ | |
3769 bitmap_set_bit (entry_block_defs, STACK_POINTER_REGNUM); | |
3770 | |
3771 /* Once the prologue has been generated, all of these registers | |
3772 should just show up in the first regular block. */ | |
3773 if (HAVE_prologue && epilogue_completed) | |
3774 { | |
3775 /* Defs for the callee saved registers are inserted so that the | |
3776 pushes have some defining location. */ | |
3777 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) | |
3778 if ((call_used_regs[i] == 0) && (df_regs_ever_live_p (i))) | |
3779 bitmap_set_bit (entry_block_defs, i); | |
3780 } | |
3781 else | |
3782 { | |
3783 /* If STATIC_CHAIN_INCOMING_REGNUM == STATIC_CHAIN_REGNUM | |
3784 only STATIC_CHAIN_REGNUM is defined. If they are different, | |
3785 we only care about the STATIC_CHAIN_INCOMING_REGNUM. */ | |
3786 #ifdef STATIC_CHAIN_INCOMING_REGNUM | |
3787 bitmap_set_bit (entry_block_defs, STATIC_CHAIN_INCOMING_REGNUM); | |
3788 #else | |
3789 #ifdef STATIC_CHAIN_REGNUM | |
3790 bitmap_set_bit (entry_block_defs, STATIC_CHAIN_REGNUM); | |
3791 #endif | |
3792 #endif | |
3793 } | |
3794 | |
3795 r = targetm.calls.struct_value_rtx (current_function_decl, true); | |
3796 if (r && REG_P (r)) | |
3797 bitmap_set_bit (entry_block_defs, REGNO (r)); | |
3798 | |
3799 if ((!reload_completed) || frame_pointer_needed) | |
3800 { | |
3801 /* Any reference to any pseudo before reload is a potential | |
3802 reference of the frame pointer. */ | |
3803 bitmap_set_bit (entry_block_defs, FRAME_POINTER_REGNUM); | |
3804 #if FRAME_POINTER_REGNUM != HARD_FRAME_POINTER_REGNUM | |
3805 /* If they are different, also mark the hard frame pointer as live. */ | |
3806 if (!LOCAL_REGNO (HARD_FRAME_POINTER_REGNUM)) | |
3807 bitmap_set_bit (entry_block_defs, HARD_FRAME_POINTER_REGNUM); | |
3808 #endif | |
3809 } | |
3810 | |
3811 /* These registers are live everywhere. */ | |
3812 if (!reload_completed) | |
3813 { | |
3814 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM | |
3815 /* Pseudos with argument area equivalences may require | |
3816 reloading via the argument pointer. */ | |
3817 if (fixed_regs[ARG_POINTER_REGNUM]) | |
3818 bitmap_set_bit (entry_block_defs, ARG_POINTER_REGNUM); | |
3819 #endif | |
3820 | |
3821 #ifdef PIC_OFFSET_TABLE_REGNUM | |
3822 /* Any constant, or pseudo with constant equivalences, may | |
3823 require reloading from memory using the pic register. */ | |
3824 if ((unsigned) PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM | |
3825 && fixed_regs[PIC_OFFSET_TABLE_REGNUM]) | |
3826 bitmap_set_bit (entry_block_defs, PIC_OFFSET_TABLE_REGNUM); | |
3827 #endif | |
3828 } | |
3829 | |
3830 #ifdef INCOMING_RETURN_ADDR_RTX | |
3831 if (REG_P (INCOMING_RETURN_ADDR_RTX)) | |
3832 bitmap_set_bit (entry_block_defs, REGNO (INCOMING_RETURN_ADDR_RTX)); | |
3833 #endif | |
3834 | |
3835 targetm.live_on_entry (entry_block_defs); | |
3836 | |
3837 /* If the function has an incoming STATIC_CHAIN, | |
3838 it has to show up in the entry def set. */ | |
3839 if (df_need_static_chain_reg (cfun)) | |
3840 { | |
3841 #ifdef STATIC_CHAIN_INCOMING_REGNUM | |
3842 bitmap_set_bit (entry_block_defs, STATIC_CHAIN_INCOMING_REGNUM); | |
3843 #else | |
3844 #ifdef STATIC_CHAIN_REGNUM | |
3845 bitmap_set_bit (entry_block_defs, STATIC_CHAIN_REGNUM); | |
3846 #endif | |
3847 #endif | |
3848 } | |
3849 } | |
3850 | |
3851 | |
3852 /* Return the (conservative) set of hard registers that are defined on | |
3853 entry to the function. | |
3854 It uses df->entry_block_defs to determine which register | |
3855 reference to include. */ | |
3856 | |
3857 static void | |
3858 df_entry_block_defs_collect (struct df_collection_rec *collection_rec, | |
3859 bitmap entry_block_defs) | |
3860 { | |
3861 unsigned int i; | |
3862 bitmap_iterator bi; | |
3863 | |
3864 EXECUTE_IF_SET_IN_BITMAP (entry_block_defs, 0, i, bi) | |
3865 { | |
3866 df_ref_record (DF_REF_ARTIFICIAL, collection_rec, regno_reg_rtx[i], NULL, | |
3867 ENTRY_BLOCK_PTR, NULL, DF_REF_REG_DEF, 0, -1, -1, 0); | |
3868 } | |
3869 | |
3870 df_canonize_collection_rec (collection_rec); | |
3871 } | |
3872 | |
3873 | |
3874 /* Record the (conservative) set of hard registers that are defined on | |
3875 entry to the function. */ | |
3876 | |
3877 static void | |
3878 df_record_entry_block_defs (bitmap entry_block_defs) | |
3879 { | |
3880 struct df_collection_rec collection_rec; | |
3881 memset (&collection_rec, 0, sizeof (struct df_collection_rec)); | |
3882 collection_rec.def_vec = XALLOCAVEC (df_ref, FIRST_PSEUDO_REGISTER); | |
3883 | |
3884 df_entry_block_defs_collect (&collection_rec, entry_block_defs); | |
3885 | |
3886 /* Process bb_refs chain */ | |
3887 df_refs_add_to_chains (&collection_rec, BASIC_BLOCK (ENTRY_BLOCK), NULL); | |
3888 } | |
3889 | |
3890 | |
3891 /* Update the defs in the entry block. */ | |
3892 | |
3893 void | |
3894 df_update_entry_block_defs (void) | |
3895 { | |
3896 bitmap refs = BITMAP_ALLOC (&df_bitmap_obstack); | |
3897 bool changed = false; | |
3898 | |
3899 df_get_entry_block_def_set (refs); | |
3900 if (df->entry_block_defs) | |
3901 { | |
3902 if (!bitmap_equal_p (df->entry_block_defs, refs)) | |
3903 { | |
3904 struct df_scan_bb_info *bb_info = df_scan_get_bb_info (ENTRY_BLOCK); | |
3905 df_ref_chain_delete_du_chain (bb_info->artificial_defs); | |
3906 df_ref_chain_delete (bb_info->artificial_defs); | |
3907 bb_info->artificial_defs = NULL; | |
3908 changed = true; | |
3909 } | |
3910 } | |
3911 else | |
3912 { | |
3913 struct df_scan_problem_data *problem_data | |
3914 = (struct df_scan_problem_data *) df_scan->problem_data; | |
3915 df->entry_block_defs = BITMAP_ALLOC (&problem_data->reg_bitmaps); | |
3916 changed = true; | |
3917 } | |
3918 | |
3919 if (changed) | |
3920 { | |
3921 df_record_entry_block_defs (refs); | |
3922 bitmap_copy (df->entry_block_defs, refs); | |
3923 df_set_bb_dirty (BASIC_BLOCK (ENTRY_BLOCK)); | |
3924 } | |
3925 BITMAP_FREE (refs); | |
3926 } | |
3927 | |
3928 | |
3929 /* Set the bit for regs that are considered being used at the exit. */ | |
3930 | |
3931 static void | |
3932 df_get_exit_block_use_set (bitmap exit_block_uses) | |
3933 { | |
3934 unsigned int i; | |
3935 | |
3936 bitmap_clear (exit_block_uses); | |
3937 | |
3938 /* Stack pointer is always live at the exit. */ | |
3939 bitmap_set_bit (exit_block_uses, STACK_POINTER_REGNUM); | |
3940 | |
3941 /* Mark the frame pointer if needed at the end of the function. | |
3942 If we end up eliminating it, it will be removed from the live | |
3943 list of each basic block by reload. */ | |
3944 | |
3945 if ((!reload_completed) || frame_pointer_needed) | |
3946 { | |
3947 bitmap_set_bit (exit_block_uses, FRAME_POINTER_REGNUM); | |
3948 #if FRAME_POINTER_REGNUM != HARD_FRAME_POINTER_REGNUM | |
3949 /* If they are different, also mark the hard frame pointer as live. */ | |
3950 if (!LOCAL_REGNO (HARD_FRAME_POINTER_REGNUM)) | |
3951 bitmap_set_bit (exit_block_uses, HARD_FRAME_POINTER_REGNUM); | |
3952 #endif | |
3953 } | |
3954 | |
3955 #ifndef PIC_OFFSET_TABLE_REG_CALL_CLOBBERED | |
3956 /* Many architectures have a GP register even without flag_pic. | |
3957 Assume the pic register is not in use, or will be handled by | |
3958 other means, if it is not fixed. */ | |
3959 if ((unsigned) PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM | |
3960 && fixed_regs[PIC_OFFSET_TABLE_REGNUM]) | |
3961 bitmap_set_bit (exit_block_uses, PIC_OFFSET_TABLE_REGNUM); | |
3962 #endif | |
3963 | |
3964 /* Mark all global registers, and all registers used by the | |
3965 epilogue as being live at the end of the function since they | |
3966 may be referenced by our caller. */ | |
3967 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) | |
3968 if (global_regs[i] || EPILOGUE_USES (i)) | |
3969 bitmap_set_bit (exit_block_uses, i); | |
3970 | |
3971 if (HAVE_epilogue && epilogue_completed) | |
3972 { | |
3973 /* Mark all call-saved registers that we actually used. */ | |
3974 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) | |
3975 if (df_regs_ever_live_p (i) && !LOCAL_REGNO (i) | |
3976 && !TEST_HARD_REG_BIT (regs_invalidated_by_call, i)) | |
3977 bitmap_set_bit (exit_block_uses, i); | |
3978 } | |
3979 | |
3980 #ifdef EH_RETURN_DATA_REGNO | |
3981 /* Mark the registers that will contain data for the handler. */ | |
3982 if (reload_completed && crtl->calls_eh_return) | |
3983 for (i = 0; ; ++i) | |
3984 { | |
3985 unsigned regno = EH_RETURN_DATA_REGNO (i); | |
3986 if (regno == INVALID_REGNUM) | |
3987 break; | |
3988 bitmap_set_bit (exit_block_uses, regno); | |
3989 } | |
3990 #endif | |
3991 | |
3992 #ifdef EH_RETURN_STACKADJ_RTX | |
3993 if ((!HAVE_epilogue || ! epilogue_completed) | |
3994 && crtl->calls_eh_return) | |
3995 { | |
3996 rtx tmp = EH_RETURN_STACKADJ_RTX; | |
3997 if (tmp && REG_P (tmp)) | |
3998 df_mark_reg (tmp, exit_block_uses); | |
3999 } | |
4000 #endif | |
4001 | |
4002 #ifdef EH_RETURN_HANDLER_RTX | |
4003 if ((!HAVE_epilogue || ! epilogue_completed) | |
4004 && crtl->calls_eh_return) | |
4005 { | |
4006 rtx tmp = EH_RETURN_HANDLER_RTX; | |
4007 if (tmp && REG_P (tmp)) | |
4008 df_mark_reg (tmp, exit_block_uses); | |
4009 } | |
4010 #endif | |
4011 | |
4012 /* Mark function return value. */ | |
4013 diddle_return_value (df_mark_reg, (void*) exit_block_uses); | |
4014 } | |
4015 | |
4016 | |
4017 /* Return the refs of hard registers that are used in the exit block. | |
4018 It uses df->exit_block_uses to determine register to include. */ | |
4019 | |
4020 static void | |
4021 df_exit_block_uses_collect (struct df_collection_rec *collection_rec, bitmap exit_block_uses) | |
4022 { | |
4023 unsigned int i; | |
4024 bitmap_iterator bi; | |
4025 | |
4026 EXECUTE_IF_SET_IN_BITMAP (exit_block_uses, 0, i, bi) | |
4027 df_ref_record (DF_REF_ARTIFICIAL, collection_rec, regno_reg_rtx[i], NULL, | |
4028 EXIT_BLOCK_PTR, NULL, DF_REF_REG_USE, 0, -1, -1, 0); | |
4029 | |
4030 #if FRAME_POINTER_REGNUM != ARG_POINTER_REGNUM | |
4031 /* It is deliberate that this is not put in the exit block uses but | |
4032 I do not know why. */ | |
4033 if (reload_completed | |
4034 && !bitmap_bit_p (exit_block_uses, ARG_POINTER_REGNUM) | |
4035 && bb_has_eh_pred (EXIT_BLOCK_PTR) | |
4036 && fixed_regs[ARG_POINTER_REGNUM]) | |
4037 df_ref_record (DF_REF_ARTIFICIAL, collection_rec, regno_reg_rtx[ARG_POINTER_REGNUM], NULL, | |
4038 EXIT_BLOCK_PTR, NULL, DF_REF_REG_USE, 0, -1, -1, 0); | |
4039 #endif | |
4040 | |
4041 df_canonize_collection_rec (collection_rec); | |
4042 } | |
4043 | |
4044 | |
4045 /* Record the set of hard registers that are used in the exit block. | |
4046 It uses df->exit_block_uses to determine which bit to include. */ | |
4047 | |
4048 static void | |
4049 df_record_exit_block_uses (bitmap exit_block_uses) | |
4050 { | |
4051 struct df_collection_rec collection_rec; | |
4052 memset (&collection_rec, 0, sizeof (struct df_collection_rec)); | |
4053 collection_rec.use_vec = XALLOCAVEC (df_ref, FIRST_PSEUDO_REGISTER); | |
4054 | |
4055 df_exit_block_uses_collect (&collection_rec, exit_block_uses); | |
4056 | |
4057 /* Process bb_refs chain */ | |
4058 df_refs_add_to_chains (&collection_rec, BASIC_BLOCK (EXIT_BLOCK), NULL); | |
4059 } | |
4060 | |
4061 | |
4062 /* Update the uses in the exit block. */ | |
4063 | |
4064 void | |
4065 df_update_exit_block_uses (void) | |
4066 { | |
4067 bitmap refs = BITMAP_ALLOC (&df_bitmap_obstack); | |
4068 bool changed = false; | |
4069 | |
4070 df_get_exit_block_use_set (refs); | |
4071 if (df->exit_block_uses) | |
4072 { | |
4073 if (!bitmap_equal_p (df->exit_block_uses, refs)) | |
4074 { | |
4075 struct df_scan_bb_info *bb_info = df_scan_get_bb_info (EXIT_BLOCK); | |
4076 df_ref_chain_delete_du_chain (bb_info->artificial_uses); | |
4077 df_ref_chain_delete (bb_info->artificial_uses); | |
4078 bb_info->artificial_uses = NULL; | |
4079 changed = true; | |
4080 } | |
4081 } | |
4082 else | |
4083 { | |
4084 struct df_scan_problem_data *problem_data | |
4085 = (struct df_scan_problem_data *) df_scan->problem_data; | |
4086 df->exit_block_uses = BITMAP_ALLOC (&problem_data->reg_bitmaps); | |
4087 changed = true; | |
4088 } | |
4089 | |
4090 if (changed) | |
4091 { | |
4092 df_record_exit_block_uses (refs); | |
4093 bitmap_copy (df->exit_block_uses, refs); | |
4094 df_set_bb_dirty (BASIC_BLOCK (EXIT_BLOCK)); | |
4095 } | |
4096 BITMAP_FREE (refs); | |
4097 } | |
4098 | |
4099 static bool initialized = false; | |
4100 | |
4101 | |
4102 /* Initialize some platform specific structures. */ | |
4103 | |
4104 void | |
4105 df_hard_reg_init (void) | |
4106 { | |
4107 #ifdef ELIMINABLE_REGS | |
4108 int i; | |
4109 static const struct {const int from, to; } eliminables[] = ELIMINABLE_REGS; | |
4110 #endif | |
4111 if (initialized) | |
4112 return; | |
4113 | |
4114 /* Record which registers will be eliminated. We use this in | |
4115 mark_used_regs. */ | |
4116 CLEAR_HARD_REG_SET (elim_reg_set); | |
4117 | |
4118 #ifdef ELIMINABLE_REGS | |
4119 for (i = 0; i < (int) ARRAY_SIZE (eliminables); i++) | |
4120 SET_HARD_REG_BIT (elim_reg_set, eliminables[i].from); | |
4121 #else | |
4122 SET_HARD_REG_BIT (elim_reg_set, FRAME_POINTER_REGNUM); | |
4123 #endif | |
4124 | |
4125 initialized = true; | |
4126 } | |
4127 | |
4128 | |
4129 /* Recompute the parts of scanning that are based on regs_ever_live | |
4130 because something changed in that array. */ | |
4131 | |
4132 void | |
4133 df_update_entry_exit_and_calls (void) | |
4134 { | |
4135 basic_block bb; | |
4136 | |
4137 df_update_entry_block_defs (); | |
4138 df_update_exit_block_uses (); | |
4139 | |
4140 /* The call insns need to be rescanned because there may be changes | |
4141 in the set of registers clobbered across the call. */ | |
4142 FOR_EACH_BB (bb) | |
4143 { | |
4144 rtx insn; | |
4145 FOR_BB_INSNS (bb, insn) | |
4146 { | |
4147 if (INSN_P (insn) && CALL_P (insn)) | |
4148 df_insn_rescan (insn); | |
4149 } | |
4150 } | |
4151 } | |
4152 | |
4153 | |
4154 /* Return true if hard REG is actually used in the some instruction. | |
4155 There are a fair number of conditions that affect the setting of | |
4156 this array. See the comment in df.h for df->hard_regs_live_count | |
4157 for the conditions that this array is set. */ | |
4158 | |
4159 bool | |
4160 df_hard_reg_used_p (unsigned int reg) | |
4161 { | |
4162 gcc_assert (df); | |
4163 return df->hard_regs_live_count[reg] != 0; | |
4164 } | |
4165 | |
4166 | |
4167 /* A count of the number of times REG is actually used in the some | |
4168 instruction. There are a fair number of conditions that affect the | |
4169 setting of this array. See the comment in df.h for | |
4170 df->hard_regs_live_count for the conditions that this array is | |
4171 set. */ | |
4172 | |
4173 | |
4174 unsigned int | |
4175 df_hard_reg_used_count (unsigned int reg) | |
4176 { | |
4177 gcc_assert (df); | |
4178 return df->hard_regs_live_count[reg]; | |
4179 } | |
4180 | |
4181 | |
4182 /* Get the value of regs_ever_live[REGNO]. */ | |
4183 | |
4184 bool | |
4185 df_regs_ever_live_p (unsigned int regno) | |
4186 { | |
4187 return regs_ever_live[regno]; | |
4188 } | |
4189 | |
4190 | |
4191 /* Set regs_ever_live[REGNO] to VALUE. If this cause regs_ever_live | |
4192 to change, schedule that change for the next update. */ | |
4193 | |
4194 void | |
4195 df_set_regs_ever_live (unsigned int regno, bool value) | |
4196 { | |
4197 if (regs_ever_live[regno] == value) | |
4198 return; | |
4199 | |
4200 regs_ever_live[regno] = value; | |
4201 if (df) | |
4202 df->redo_entry_and_exit = true; | |
4203 } | |
4204 | |
4205 | |
4206 /* Compute "regs_ever_live" information from the underlying df | |
4207 information. Set the vector to all false if RESET. */ | |
4208 | |
4209 void | |
4210 df_compute_regs_ever_live (bool reset) | |
4211 { | |
4212 unsigned int i; | |
4213 bool changed = df->redo_entry_and_exit; | |
4214 | |
4215 if (reset) | |
4216 memset (regs_ever_live, 0, sizeof (regs_ever_live)); | |
4217 | |
4218 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) | |
4219 if ((!regs_ever_live[i]) && df_hard_reg_used_p (i)) | |
4220 { | |
4221 regs_ever_live[i] = true; | |
4222 changed = true; | |
4223 } | |
4224 if (changed) | |
4225 df_update_entry_exit_and_calls (); | |
4226 df->redo_entry_and_exit = false; | |
4227 } | |
4228 | |
4229 | |
4230 /*---------------------------------------------------------------------------- | |
4231 Dataflow ref information verification functions. | |
4232 | |
4233 df_reg_chain_mark (refs, regno, is_def, is_eq_use) | |
4234 df_reg_chain_verify_unmarked (refs) | |
4235 df_refs_verify (ref*, ref*, bool) | |
4236 df_mws_verify (mw*, mw*, bool) | |
4237 df_insn_refs_verify (collection_rec, bb, insn, bool) | |
4238 df_bb_refs_verify (bb, refs, bool) | |
4239 df_bb_verify (bb) | |
4240 df_exit_block_bitmap_verify (bool) | |
4241 df_entry_block_bitmap_verify (bool) | |
4242 df_scan_verify () | |
4243 ----------------------------------------------------------------------------*/ | |
4244 | |
4245 | |
4246 /* Mark all refs in the reg chain. Verify that all of the registers | |
4247 are in the correct chain. */ | |
4248 | |
4249 static unsigned int | |
4250 df_reg_chain_mark (df_ref refs, unsigned int regno, | |
4251 bool is_def, bool is_eq_use) | |
4252 { | |
4253 unsigned int count = 0; | |
4254 df_ref ref; | |
4255 for (ref = refs; ref; ref = DF_REF_NEXT_REG (ref)) | |
4256 { | |
4257 gcc_assert (!DF_REF_IS_REG_MARKED (ref)); | |
4258 | |
4259 /* If there are no def-use or use-def chains, make sure that all | |
4260 of the chains are clear. */ | |
4261 if (!df_chain) | |
4262 gcc_assert (!DF_REF_CHAIN (ref)); | |
4263 | |
4264 /* Check to make sure the ref is in the correct chain. */ | |
4265 gcc_assert (DF_REF_REGNO (ref) == regno); | |
4266 if (is_def) | |
4267 gcc_assert (DF_REF_REG_DEF_P (ref)); | |
4268 else | |
4269 gcc_assert (!DF_REF_REG_DEF_P (ref)); | |
4270 | |
4271 if (is_eq_use) | |
4272 gcc_assert ((DF_REF_FLAGS (ref) & DF_REF_IN_NOTE)); | |
4273 else | |
4274 gcc_assert ((DF_REF_FLAGS (ref) & DF_REF_IN_NOTE) == 0); | |
4275 | |
4276 if (DF_REF_NEXT_REG (ref)) | |
4277 gcc_assert (DF_REF_PREV_REG (DF_REF_NEXT_REG (ref)) == ref); | |
4278 count++; | |
4279 DF_REF_REG_MARK (ref); | |
4280 } | |
4281 return count; | |
4282 } | |
4283 | |
4284 | |
4285 /* Verify that all of the registers in the chain are unmarked. */ | |
4286 | |
4287 static void | |
4288 df_reg_chain_verify_unmarked (df_ref refs) | |
4289 { | |
4290 df_ref ref; | |
4291 for (ref = refs; ref; ref = DF_REF_NEXT_REG (ref)) | |
4292 gcc_assert (!DF_REF_IS_REG_MARKED (ref)); | |
4293 } | |
4294 | |
4295 | |
4296 /* Verify that NEW_REC and OLD_REC have exactly the same members. */ | |
4297 | |
4298 static bool | |
4299 df_refs_verify (df_ref *new_rec, df_ref *old_rec, | |
4300 bool abort_if_fail) | |
4301 { | |
4302 while ((*new_rec) && (*old_rec)) | |
4303 { | |
4304 if (!df_ref_equal_p (*new_rec, *old_rec)) | |
4305 { | |
4306 if (abort_if_fail) | |
4307 gcc_assert (0); | |
4308 else | |
4309 return false; | |
4310 } | |
4311 | |
4312 /* Abort if fail is called from the function level verifier. If | |
4313 that is the context, mark this reg as being seem. */ | |
4314 if (abort_if_fail) | |
4315 { | |
4316 gcc_assert (DF_REF_IS_REG_MARKED (*old_rec)); | |
4317 DF_REF_REG_UNMARK (*old_rec); | |
4318 } | |
4319 | |
4320 new_rec++; | |
4321 old_rec++; | |
4322 } | |
4323 | |
4324 if (abort_if_fail) | |
4325 gcc_assert ((*new_rec == NULL) && (*old_rec == NULL)); | |
4326 else | |
4327 return ((*new_rec == NULL) && (*old_rec == NULL)); | |
4328 return false; | |
4329 } | |
4330 | |
4331 | |
4332 /* Verify that NEW_REC and OLD_REC have exactly the same members. */ | |
4333 | |
4334 static bool | |
4335 df_mws_verify (struct df_mw_hardreg **new_rec, struct df_mw_hardreg **old_rec, | |
4336 bool abort_if_fail) | |
4337 { | |
4338 while ((*new_rec) && (*old_rec)) | |
4339 { | |
4340 if (!df_mw_equal_p (*new_rec, *old_rec)) | |
4341 { | |
4342 if (abort_if_fail) | |
4343 gcc_assert (0); | |
4344 else | |
4345 return false; | |
4346 } | |
4347 new_rec++; | |
4348 old_rec++; | |
4349 } | |
4350 | |
4351 if (abort_if_fail) | |
4352 gcc_assert ((*new_rec == NULL) && (*old_rec == NULL)); | |
4353 else | |
4354 return ((*new_rec == NULL) && (*old_rec == NULL)); | |
4355 return false; | |
4356 } | |
4357 | |
4358 | |
4359 /* Return true if the existing insn refs information is complete and | |
4360 correct. Otherwise (i.e. if there's any missing or extra refs), | |
4361 return the correct df_ref chain in REFS_RETURN. | |
4362 | |
4363 If ABORT_IF_FAIL, leave the refs that are verified (already in the | |
4364 ref chain) as DF_REF_MARKED(). If it's false, then it's a per-insn | |
4365 verification mode instead of the whole function, so unmark | |
4366 everything. | |
4367 | |
4368 If ABORT_IF_FAIL is set, this function never returns false. */ | |
4369 | |
4370 static bool | |
4371 df_insn_refs_verify (struct df_collection_rec *collection_rec, | |
4372 basic_block bb, | |
4373 rtx insn, | |
4374 bool abort_if_fail) | |
4375 { | |
4376 bool ret1, ret2, ret3, ret4; | |
4377 unsigned int uid = INSN_UID (insn); | |
4378 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn); | |
4379 | |
4380 df_insn_refs_collect (collection_rec, bb, insn_info); | |
4381 | |
4382 if (!DF_INSN_UID_DEFS (uid)) | |
4383 { | |
4384 /* The insn_rec was created but it was never filled out. */ | |
4385 if (abort_if_fail) | |
4386 gcc_assert (0); | |
4387 else | |
4388 return false; | |
4389 } | |
4390 | |
4391 /* Unfortunately we cannot opt out early if one of these is not | |
4392 right because the marks will not get cleared. */ | |
4393 ret1 = df_refs_verify (collection_rec->def_vec, DF_INSN_UID_DEFS (uid), | |
4394 abort_if_fail); | |
4395 ret2 = df_refs_verify (collection_rec->use_vec, DF_INSN_UID_USES (uid), | |
4396 abort_if_fail); | |
4397 ret3 = df_refs_verify (collection_rec->eq_use_vec, DF_INSN_UID_EQ_USES (uid), | |
4398 abort_if_fail); | |
4399 ret4 = df_mws_verify (collection_rec->mw_vec, DF_INSN_UID_MWS (uid), | |
4400 abort_if_fail); | |
4401 return (ret1 && ret2 && ret3 && ret4); | |
4402 } | |
4403 | |
4404 | |
4405 /* Return true if all refs in the basic block are correct and complete. | |
4406 Due to df_ref_chain_verify, it will cause all refs | |
4407 that are verified to have DF_REF_MARK bit set. */ | |
4408 | |
4409 static bool | |
4410 df_bb_verify (basic_block bb) | |
4411 { | |
4412 rtx insn; | |
4413 struct df_scan_bb_info *bb_info = df_scan_get_bb_info (bb->index); | |
4414 struct df_collection_rec collection_rec; | |
4415 | |
4416 memset (&collection_rec, 0, sizeof (struct df_collection_rec)); | |
4417 collection_rec.def_vec = XALLOCAVEC (df_ref, 1000); | |
4418 collection_rec.use_vec = XALLOCAVEC (df_ref, 1000); | |
4419 collection_rec.eq_use_vec = XALLOCAVEC (df_ref, 1000); | |
4420 collection_rec.mw_vec = XALLOCAVEC (struct df_mw_hardreg *, 100); | |
4421 | |
4422 gcc_assert (bb_info); | |
4423 | |
4424 /* Scan the block, one insn at a time, from beginning to end. */ | |
4425 FOR_BB_INSNS_REVERSE (bb, insn) | |
4426 { | |
4427 if (!INSN_P (insn)) | |
4428 continue; | |
4429 df_insn_refs_verify (&collection_rec, bb, insn, true); | |
4430 df_free_collection_rec (&collection_rec); | |
4431 } | |
4432 | |
4433 /* Do the artificial defs and uses. */ | |
4434 df_bb_refs_collect (&collection_rec, bb); | |
4435 df_refs_verify (collection_rec.def_vec, df_get_artificial_defs (bb->index), true); | |
4436 df_refs_verify (collection_rec.use_vec, df_get_artificial_uses (bb->index), true); | |
4437 df_free_collection_rec (&collection_rec); | |
4438 | |
4439 return true; | |
4440 } | |
4441 | |
4442 | |
4443 /* Returns true if the entry block has correct and complete df_ref set. | |
4444 If not it either aborts if ABORT_IF_FAIL is true or returns false. */ | |
4445 | |
4446 static bool | |
4447 df_entry_block_bitmap_verify (bool abort_if_fail) | |
4448 { | |
4449 bitmap entry_block_defs = BITMAP_ALLOC (&df_bitmap_obstack); | |
4450 bool is_eq; | |
4451 | |
4452 df_get_entry_block_def_set (entry_block_defs); | |
4453 | |
4454 is_eq = bitmap_equal_p (entry_block_defs, df->entry_block_defs); | |
4455 | |
4456 if (!is_eq && abort_if_fail) | |
4457 { | |
4458 print_current_pass (stderr); | |
4459 fprintf (stderr, "entry_block_defs = "); | |
4460 df_print_regset (stderr, entry_block_defs); | |
4461 fprintf (stderr, "df->entry_block_defs = "); | |
4462 df_print_regset (stderr, df->entry_block_defs); | |
4463 gcc_assert (0); | |
4464 } | |
4465 | |
4466 BITMAP_FREE (entry_block_defs); | |
4467 | |
4468 return is_eq; | |
4469 } | |
4470 | |
4471 | |
4472 /* Returns true if the exit block has correct and complete df_ref set. | |
4473 If not it either aborts if ABORT_IF_FAIL is true or returns false. */ | |
4474 | |
4475 static bool | |
4476 df_exit_block_bitmap_verify (bool abort_if_fail) | |
4477 { | |
4478 bitmap exit_block_uses = BITMAP_ALLOC (&df_bitmap_obstack); | |
4479 bool is_eq; | |
4480 | |
4481 df_get_exit_block_use_set (exit_block_uses); | |
4482 | |
4483 is_eq = bitmap_equal_p (exit_block_uses, df->exit_block_uses); | |
4484 | |
4485 if (!is_eq && abort_if_fail) | |
4486 { | |
4487 print_current_pass (stderr); | |
4488 fprintf (stderr, "exit_block_uses = "); | |
4489 df_print_regset (stderr, exit_block_uses); | |
4490 fprintf (stderr, "df->exit_block_uses = "); | |
4491 df_print_regset (stderr, df->exit_block_uses); | |
4492 gcc_assert (0); | |
4493 } | |
4494 | |
4495 BITMAP_FREE (exit_block_uses); | |
4496 | |
4497 return is_eq; | |
4498 } | |
4499 | |
4500 | |
4501 /* Return true if df_ref information for all insns in all blocks are | |
4502 correct and complete. */ | |
4503 | |
4504 void | |
4505 df_scan_verify (void) | |
4506 { | |
4507 unsigned int i; | |
4508 basic_block bb; | |
4509 bitmap regular_block_artificial_uses; | |
4510 bitmap eh_block_artificial_uses; | |
4511 | |
4512 if (!df) | |
4513 return; | |
4514 | |
4515 /* Verification is a 4 step process. */ | |
4516 | |
4517 /* (1) All of the refs are marked by going thru the reg chains. */ | |
4518 for (i = 0; i < DF_REG_SIZE (df); i++) | |
4519 { | |
4520 gcc_assert (df_reg_chain_mark (DF_REG_DEF_CHAIN (i), i, true, false) | |
4521 == DF_REG_DEF_COUNT(i)); | |
4522 gcc_assert (df_reg_chain_mark (DF_REG_USE_CHAIN (i), i, false, false) | |
4523 == DF_REG_USE_COUNT(i)); | |
4524 gcc_assert (df_reg_chain_mark (DF_REG_EQ_USE_CHAIN (i), i, false, true) | |
4525 == DF_REG_EQ_USE_COUNT(i)); | |
4526 } | |
4527 | |
4528 /* (2) There are various bitmaps whose value may change over the | |
4529 course of the compilation. This step recomputes them to make | |
4530 sure that they have not slipped out of date. */ | |
4531 regular_block_artificial_uses = BITMAP_ALLOC (&df_bitmap_obstack); | |
4532 eh_block_artificial_uses = BITMAP_ALLOC (&df_bitmap_obstack); | |
4533 | |
4534 df_get_regular_block_artificial_uses (regular_block_artificial_uses); | |
4535 df_get_eh_block_artificial_uses (eh_block_artificial_uses); | |
4536 | |
4537 bitmap_ior_into (eh_block_artificial_uses, | |
4538 regular_block_artificial_uses); | |
4539 | |
4540 /* Check artificial_uses bitmaps didn't change. */ | |
4541 gcc_assert (bitmap_equal_p (regular_block_artificial_uses, | |
4542 df->regular_block_artificial_uses)); | |
4543 gcc_assert (bitmap_equal_p (eh_block_artificial_uses, | |
4544 df->eh_block_artificial_uses)); | |
4545 | |
4546 BITMAP_FREE (regular_block_artificial_uses); | |
4547 BITMAP_FREE (eh_block_artificial_uses); | |
4548 | |
4549 /* Verify entry block and exit block. These only verify the bitmaps, | |
4550 the refs are verified in df_bb_verify. */ | |
4551 df_entry_block_bitmap_verify (true); | |
4552 df_exit_block_bitmap_verify (true); | |
4553 | |
4554 /* (3) All of the insns in all of the blocks are traversed and the | |
4555 marks are cleared both in the artificial refs attached to the | |
4556 blocks and the real refs inside the insns. It is a failure to | |
4557 clear a mark that has not been set as this means that the ref in | |
4558 the block or insn was not in the reg chain. */ | |
4559 | |
4560 FOR_ALL_BB (bb) | |
4561 df_bb_verify (bb); | |
4562 | |
4563 /* (4) See if all reg chains are traversed a second time. This time | |
4564 a check is made that the marks are clear. A set mark would be a | |
4565 from a reg that is not in any insn or basic block. */ | |
4566 | |
4567 for (i = 0; i < DF_REG_SIZE (df); i++) | |
4568 { | |
4569 df_reg_chain_verify_unmarked (DF_REG_DEF_CHAIN (i)); | |
4570 df_reg_chain_verify_unmarked (DF_REG_USE_CHAIN (i)); | |
4571 df_reg_chain_verify_unmarked (DF_REG_EQ_USE_CHAIN (i)); | |
4572 } | |
4573 } |