comparison gcc/ipa-utils.c @ 111:04ced10e8804

gcc 7
author kono
date Fri, 27 Oct 2017 22:46:09 +0900
parents 77e2b8dfacca
children 84e7813d76e9
comparison
equal deleted inserted replaced
68:561a7518be6b 111:04ced10e8804
1 /* Utilities for ipa analysis. 1 /* Utilities for ipa analysis.
2 Copyright (C) 2005, 2007, 2008 Free Software Foundation, Inc. 2 Copyright (C) 2005-2017 Free Software Foundation, Inc.
3 Contributed by Kenneth Zadeck <zadeck@naturalbridge.com> 3 Contributed by Kenneth Zadeck <zadeck@naturalbridge.com>
4 4
5 This file is part of GCC. 5 This file is part of GCC.
6 6
7 GCC is free software; you can redistribute it and/or modify it under 7 GCC is free software; you can redistribute it and/or modify it under
19 <http://www.gnu.org/licenses/>. */ 19 <http://www.gnu.org/licenses/>. */
20 20
21 #include "config.h" 21 #include "config.h"
22 #include "system.h" 22 #include "system.h"
23 #include "coretypes.h" 23 #include "coretypes.h"
24 #include "tm.h" 24 #include "backend.h"
25 #include "tree.h" 25 #include "tree.h"
26 #include "tree-flow.h" 26 #include "gimple.h"
27 #include "tree-inline.h" 27 #include "predict.h"
28 #include "tree-pass.h" 28 #include "alloc-pool.h"
29 #include "langhooks.h" 29 #include "cgraph.h"
30 #include "pointer-set.h" 30 #include "lto-streamer.h"
31 #include "dumpfile.h"
31 #include "splay-tree.h" 32 #include "splay-tree.h"
32 #include "ggc.h"
33 #include "ipa-utils.h" 33 #include "ipa-utils.h"
34 #include "ipa-reference.h" 34 #include "symbol-summary.h"
35 #include "gimple.h" 35 #include "tree-vrp.h"
36 #include "cgraph.h" 36 #include "ipa-prop.h"
37 #include "output.h" 37 #include "ipa-fnsummary.h"
38 #include "flags.h"
39 #include "timevar.h"
40 #include "diagnostic.h"
41 #include "langhooks.h"
42 38
43 /* Debugging function for postorder and inorder code. NOTE is a string 39 /* Debugging function for postorder and inorder code. NOTE is a string
44 that is printed before the nodes are printed. ORDER is an array of 40 that is printed before the nodes are printed. ORDER is an array of
45 cgraph_nodes that has COUNT useful nodes in it. */ 41 cgraph_nodes that has COUNT useful nodes in it. */
46 42
47 void 43 void
48 ipa_utils_print_order (FILE* out, 44 ipa_print_order (FILE* out,
49 const char * note, 45 const char * note,
50 struct cgraph_node** order, 46 struct cgraph_node** order,
51 int count) 47 int count)
52 { 48 {
53 int i; 49 int i;
54 fprintf (out, "\n\n ordered call graph: %s\n", note); 50 fprintf (out, "\n\n ordered call graph: %s\n", note);
55 51
56 for (i = count - 1; i >= 0; i--) 52 for (i = count - 1; i >= 0; i--)
57 dump_cgraph_node(dump_file, order[i]); 53 order[i]->dump (out);
58 fprintf (out, "\n"); 54 fprintf (out, "\n");
59 fflush(out); 55 fflush (out);
60 } 56 }
61 57
62 58
63 struct searchc_env { 59 struct searchc_env {
64 struct cgraph_node **stack; 60 struct cgraph_node **stack;
61 struct cgraph_node **result;
65 int stack_size; 62 int stack_size;
66 struct cgraph_node **result;
67 int order_pos; 63 int order_pos;
68 splay_tree nodes_marked_new; 64 splay_tree nodes_marked_new;
69 bool reduce; 65 bool reduce;
66 bool allow_overwritable;
70 int count; 67 int count;
71 }; 68 };
72 69
73 /* This is an implementation of Tarjan's strongly connected region 70 /* This is an implementation of Tarjan's strongly connected region
74 finder as reprinted in Aho Hopcraft and Ullman's The Design and 71 finder as reprinted in Aho Hopcraft and Ullman's The Design and
75 Analysis of Computer Programs (1975) pages 192-193. This version 72 Analysis of Computer Programs (1975) pages 192-193. This version
76 has been customized for cgraph_nodes. The env parameter is because 73 has been customized for cgraph_nodes. The env parameter is because
77 it is recursive and there are no nested functions here. This 74 it is recursive and there are no nested functions here. This
78 function should only be called from itself or 75 function should only be called from itself or
79 ipa_utils_reduced_inorder. ENV is a stack env and would be 76 ipa_reduced_postorder. ENV is a stack env and would be
80 unnecessary if C had nested functions. V is the node to start 77 unnecessary if C had nested functions. V is the node to start
81 searching from. */ 78 searching from. */
82 79
83 static void 80 static void
84 searchc (struct searchc_env* env, struct cgraph_node *v, 81 searchc (struct searchc_env* env, struct cgraph_node *v,
98 v_info->on_stack = true; 95 v_info->on_stack = true;
99 96
100 for (edge = v->callees; edge; edge = edge->next_callee) 97 for (edge = v->callees; edge; edge = edge->next_callee)
101 { 98 {
102 struct ipa_dfs_info * w_info; 99 struct ipa_dfs_info * w_info;
103 struct cgraph_node *w = edge->callee; 100 enum availability avail;
104 101 struct cgraph_node *w = edge->callee->ultimate_alias_target (&avail);
105 if (ignore_edge && ignore_edge (edge)) 102
103 if (!w || (ignore_edge && ignore_edge (edge)))
106 continue; 104 continue;
107 105
108 if (w->aux && cgraph_function_body_availability (edge->callee) > AVAIL_OVERWRITABLE) 106 if (w->aux
107 && (avail > AVAIL_INTERPOSABLE
108 || (env->allow_overwritable && avail == AVAIL_INTERPOSABLE)))
109 { 109 {
110 w_info = (struct ipa_dfs_info *) w->aux; 110 w_info = (struct ipa_dfs_info *) w->aux;
111 if (w_info->new_node) 111 if (w_info->new_node)
112 { 112 {
113 searchc (env, w, ignore_edge); 113 searchc (env, w, ignore_edge);
132 struct ipa_dfs_info *x_info; 132 struct ipa_dfs_info *x_info;
133 do { 133 do {
134 x = env->stack[--(env->stack_size)]; 134 x = env->stack[--(env->stack_size)];
135 x_info = (struct ipa_dfs_info *) x->aux; 135 x_info = (struct ipa_dfs_info *) x->aux;
136 x_info->on_stack = false; 136 x_info->on_stack = false;
137 x_info->scc_no = v_info->dfn_number;
137 138
138 if (env->reduce) 139 if (env->reduce)
139 { 140 {
140 x_info->next_cycle = last; 141 x_info->next_cycle = last;
141 last = x; 142 last = x;
149 } 150 }
150 } 151 }
151 152
152 /* Topsort the call graph by caller relation. Put the result in ORDER. 153 /* Topsort the call graph by caller relation. Put the result in ORDER.
153 154
154 The REDUCE flag is true if you want the cycles reduced to single 155 The REDUCE flag is true if you want the cycles reduced to single nodes.
155 nodes. Only consider nodes that have the output bit set. */ 156 You can use ipa_get_nodes_in_cycle to obtain a vector containing all real
157 call graph nodes in a reduced node.
158
159 Set ALLOW_OVERWRITABLE if nodes with such availability should be included.
160 IGNORE_EDGE, if non-NULL is a hook that may make some edges insignificant
161 for the topological sort. */
156 162
157 int 163 int
158 ipa_utils_reduced_inorder (struct cgraph_node **order, 164 ipa_reduced_postorder (struct cgraph_node **order,
159 bool reduce, bool allow_overwritable, 165 bool reduce, bool allow_overwritable,
160 bool (*ignore_edge) (struct cgraph_edge *)) 166 bool (*ignore_edge) (struct cgraph_edge *))
161 { 167 {
162 struct cgraph_node *node; 168 struct cgraph_node *node;
163 struct searchc_env env; 169 struct searchc_env env;
164 splay_tree_node result; 170 splay_tree_node result;
165 env.stack = XCNEWVEC (struct cgraph_node *, cgraph_n_nodes); 171 env.stack = XCNEWVEC (struct cgraph_node *, symtab->cgraph_count);
166 env.stack_size = 0; 172 env.stack_size = 0;
167 env.result = order; 173 env.result = order;
168 env.order_pos = 0; 174 env.order_pos = 0;
169 env.nodes_marked_new = splay_tree_new (splay_tree_compare_ints, 0, 0); 175 env.nodes_marked_new = splay_tree_new (splay_tree_compare_ints, 0, 0);
170 env.count = 1; 176 env.count = 1;
171 env.reduce = reduce; 177 env.reduce = reduce;
172 178 env.allow_overwritable = allow_overwritable;
173 for (node = cgraph_nodes; node; node = node->next) 179
174 { 180 FOR_EACH_DEFINED_FUNCTION (node)
175 enum availability avail = cgraph_function_body_availability (node); 181 {
176 182 enum availability avail = node->get_availability ();
177 if (avail > AVAIL_OVERWRITABLE 183
184 if (avail > AVAIL_INTERPOSABLE
178 || (allow_overwritable 185 || (allow_overwritable
179 && (avail == AVAIL_OVERWRITABLE))) 186 && (avail == AVAIL_INTERPOSABLE)))
180 { 187 {
181 /* Reuse the info if it is already there. */ 188 /* Reuse the info if it is already there. */
182 struct ipa_dfs_info *info = (struct ipa_dfs_info *) node->aux; 189 struct ipa_dfs_info *info = (struct ipa_dfs_info *) node->aux;
183 if (!info) 190 if (!info)
184 info = XCNEW (struct ipa_dfs_info); 191 info = XCNEW (struct ipa_dfs_info);
205 free (env.stack); 212 free (env.stack);
206 213
207 return env.order_pos; 214 return env.order_pos;
208 } 215 }
209 216
217 /* Deallocate all ipa_dfs_info structures pointed to by the aux pointer of call
218 graph nodes. */
219
220 void
221 ipa_free_postorder_info (void)
222 {
223 struct cgraph_node *node;
224 FOR_EACH_DEFINED_FUNCTION (node)
225 {
226 /* Get rid of the aux information. */
227 if (node->aux)
228 {
229 free (node->aux);
230 node->aux = NULL;
231 }
232 }
233 }
234
235 /* Get the set of nodes for the cycle in the reduced call graph starting
236 from NODE. */
237
238 vec<cgraph_node *>
239 ipa_get_nodes_in_cycle (struct cgraph_node *node)
240 {
241 vec<cgraph_node *> v = vNULL;
242 struct ipa_dfs_info *node_dfs_info;
243 while (node)
244 {
245 v.safe_push (node);
246 node_dfs_info = (struct ipa_dfs_info *) node->aux;
247 node = node_dfs_info->next_cycle;
248 }
249 return v;
250 }
251
252 /* Return true iff the CS is an edge within a strongly connected component as
253 computed by ipa_reduced_postorder. */
254
255 bool
256 ipa_edge_within_scc (struct cgraph_edge *cs)
257 {
258 struct ipa_dfs_info *caller_dfs = (struct ipa_dfs_info *) cs->caller->aux;
259 struct ipa_dfs_info *callee_dfs;
260 struct cgraph_node *callee = cs->callee->function_symbol ();
261
262 callee_dfs = (struct ipa_dfs_info *) callee->aux;
263 return (caller_dfs
264 && callee_dfs
265 && caller_dfs->scc_no == callee_dfs->scc_no);
266 }
267
268 struct postorder_stack
269 {
270 struct cgraph_node *node;
271 struct cgraph_edge *edge;
272 int ref;
273 };
274
275 /* Fill array order with all nodes with output flag set in the reverse
276 topological order. Return the number of elements in the array.
277 FIXME: While walking, consider aliases, too. */
278
279 int
280 ipa_reverse_postorder (struct cgraph_node **order)
281 {
282 struct cgraph_node *node, *node2;
283 int stack_size = 0;
284 int order_pos = 0;
285 struct cgraph_edge *edge;
286 int pass;
287 struct ipa_ref *ref = NULL;
288
289 struct postorder_stack *stack =
290 XCNEWVEC (struct postorder_stack, symtab->cgraph_count);
291
292 /* We have to deal with cycles nicely, so use a depth first traversal
293 output algorithm. Ignore the fact that some functions won't need
294 to be output and put them into order as well, so we get dependencies
295 right through inline functions. */
296 FOR_EACH_FUNCTION (node)
297 node->aux = NULL;
298 for (pass = 0; pass < 2; pass++)
299 FOR_EACH_FUNCTION (node)
300 if (!node->aux
301 && (pass
302 || (!node->address_taken
303 && !node->global.inlined_to
304 && !node->alias && !node->thunk.thunk_p
305 && !node->only_called_directly_p ())))
306 {
307 stack_size = 0;
308 stack[stack_size].node = node;
309 stack[stack_size].edge = node->callers;
310 stack[stack_size].ref = 0;
311 node->aux = (void *)(size_t)1;
312 while (stack_size >= 0)
313 {
314 while (true)
315 {
316 node2 = NULL;
317 while (stack[stack_size].edge && !node2)
318 {
319 edge = stack[stack_size].edge;
320 node2 = edge->caller;
321 stack[stack_size].edge = edge->next_caller;
322 /* Break possible cycles involving always-inline
323 functions by ignoring edges from always-inline
324 functions to non-always-inline functions. */
325 if (DECL_DISREGARD_INLINE_LIMITS (edge->caller->decl)
326 && !DECL_DISREGARD_INLINE_LIMITS
327 (edge->callee->function_symbol ()->decl))
328 node2 = NULL;
329 }
330 for (; stack[stack_size].node->iterate_referring (
331 stack[stack_size].ref,
332 ref) && !node2;
333 stack[stack_size].ref++)
334 {
335 if (ref->use == IPA_REF_ALIAS)
336 node2 = dyn_cast <cgraph_node *> (ref->referring);
337 }
338 if (!node2)
339 break;
340 if (!node2->aux)
341 {
342 stack[++stack_size].node = node2;
343 stack[stack_size].edge = node2->callers;
344 stack[stack_size].ref = 0;
345 node2->aux = (void *)(size_t)1;
346 }
347 }
348 order[order_pos++] = stack[stack_size--].node;
349 }
350 }
351 free (stack);
352 FOR_EACH_FUNCTION (node)
353 node->aux = NULL;
354 return order_pos;
355 }
356
357
210 358
211 /* Given a memory reference T, will return the variable at the bottom 359 /* Given a memory reference T, will return the variable at the bottom
212 of the access. Unlike get_base_address, this will recurse thru 360 of the access. Unlike get_base_address, this will recurse through
213 INDIRECT_REFS. */ 361 INDIRECT_REFS. */
214 362
215 tree 363 tree
216 get_base_var (tree t) 364 get_base_var (tree t)
217 { 365 {
225 t = TREE_OPERAND (t, 0); 373 t = TREE_OPERAND (t, 0);
226 } 374 }
227 return t; 375 return t;
228 } 376 }
229 377
378
379 /* SRC and DST are going to be merged. Take SRC's profile and merge it into
380 DST so it is not going to be lost. Possibly destroy SRC's body on the way
381 unless PRESERVE_BODY is set. */
382
383 void
384 ipa_merge_profiles (struct cgraph_node *dst,
385 struct cgraph_node *src,
386 bool preserve_body)
387 {
388 tree oldsrcdecl = src->decl;
389 struct function *srccfun, *dstcfun;
390 bool match = true;
391
392 if (!src->definition
393 || !dst->definition)
394 return;
395 if (src->frequency < dst->frequency)
396 src->frequency = dst->frequency;
397
398 /* Time profiles are merged. */
399 if (dst->tp_first_run > src->tp_first_run && src->tp_first_run)
400 dst->tp_first_run = src->tp_first_run;
401
402 if (src->profile_id && !dst->profile_id)
403 dst->profile_id = src->profile_id;
404
405 /* FIXME when we merge in unknown profile, we ought to set counts as
406 unsafe. */
407 if (!src->count.initialized_p ())
408 return;
409 if (symtab->dump_file)
410 {
411 fprintf (symtab->dump_file, "Merging profiles of %s to %s\n",
412 src->dump_name (), dst->dump_name ());
413 }
414 if (dst->count.initialized_p ())
415 dst->count += src->count;
416 else
417 dst->count = src->count;
418
419 /* This is ugly. We need to get both function bodies into memory.
420 If declaration is merged, we need to duplicate it to be able
421 to load body that is being replaced. This makes symbol table
422 temporarily inconsistent. */
423 if (src->decl == dst->decl)
424 {
425 struct lto_in_decl_state temp;
426 struct lto_in_decl_state *state;
427
428 /* We are going to move the decl, we want to remove its file decl data.
429 and link these with the new decl. */
430 temp.fn_decl = src->decl;
431 lto_in_decl_state **slot
432 = src->lto_file_data->function_decl_states->find_slot (&temp,
433 NO_INSERT);
434 state = *slot;
435 src->lto_file_data->function_decl_states->clear_slot (slot);
436 gcc_assert (state);
437
438 /* Duplicate the decl and be sure it does not link into body of DST. */
439 src->decl = copy_node (src->decl);
440 DECL_STRUCT_FUNCTION (src->decl) = NULL;
441 DECL_ARGUMENTS (src->decl) = NULL;
442 DECL_INITIAL (src->decl) = NULL;
443 DECL_RESULT (src->decl) = NULL;
444
445 /* Associate the decl state with new declaration, so LTO streamer
446 can look it up. */
447 state->fn_decl = src->decl;
448 slot
449 = src->lto_file_data->function_decl_states->find_slot (state, INSERT);
450 gcc_assert (!*slot);
451 *slot = state;
452 }
453 src->get_untransformed_body ();
454 dst->get_untransformed_body ();
455 srccfun = DECL_STRUCT_FUNCTION (src->decl);
456 dstcfun = DECL_STRUCT_FUNCTION (dst->decl);
457 if (n_basic_blocks_for_fn (srccfun)
458 != n_basic_blocks_for_fn (dstcfun))
459 {
460 if (symtab->dump_file)
461 fprintf (symtab->dump_file,
462 "Giving up; number of basic block mismatch.\n");
463 match = false;
464 }
465 else if (last_basic_block_for_fn (srccfun)
466 != last_basic_block_for_fn (dstcfun))
467 {
468 if (symtab->dump_file)
469 fprintf (symtab->dump_file,
470 "Giving up; last block mismatch.\n");
471 match = false;
472 }
473 else
474 {
475 basic_block srcbb, dstbb;
476
477 FOR_ALL_BB_FN (srcbb, srccfun)
478 {
479 unsigned int i;
480
481 dstbb = BASIC_BLOCK_FOR_FN (dstcfun, srcbb->index);
482 if (dstbb == NULL)
483 {
484 if (symtab->dump_file)
485 fprintf (symtab->dump_file,
486 "No matching block for bb %i.\n",
487 srcbb->index);
488 match = false;
489 break;
490 }
491 if (EDGE_COUNT (srcbb->succs) != EDGE_COUNT (dstbb->succs))
492 {
493 if (symtab->dump_file)
494 fprintf (symtab->dump_file,
495 "Edge count mistmatch for bb %i.\n",
496 srcbb->index);
497 match = false;
498 break;
499 }
500 for (i = 0; i < EDGE_COUNT (srcbb->succs); i++)
501 {
502 edge srce = EDGE_SUCC (srcbb, i);
503 edge dste = EDGE_SUCC (dstbb, i);
504 if (srce->dest->index != dste->dest->index)
505 {
506 if (symtab->dump_file)
507 fprintf (symtab->dump_file,
508 "Succ edge mistmatch for bb %i.\n",
509 srce->dest->index);
510 match = false;
511 break;
512 }
513 }
514 }
515 }
516 if (match)
517 {
518 struct cgraph_edge *e, *e2;
519 basic_block srcbb, dstbb;
520
521 /* TODO: merge also statement histograms. */
522 FOR_ALL_BB_FN (srcbb, srccfun)
523 {
524 unsigned int i;
525
526 dstbb = BASIC_BLOCK_FOR_FN (dstcfun, srcbb->index);
527 if (!dstbb->count.initialized_p ())
528 {
529 dstbb->count = srcbb->count;
530 for (i = 0; i < EDGE_COUNT (srcbb->succs); i++)
531 {
532 edge srce = EDGE_SUCC (srcbb, i);
533 edge dste = EDGE_SUCC (dstbb, i);
534 if (srce->probability.initialized_p ())
535 dste->probability = srce->probability;
536 }
537 }
538 else if (srcbb->count.initialized_p ())
539 {
540 for (i = 0; i < EDGE_COUNT (srcbb->succs); i++)
541 {
542 edge srce = EDGE_SUCC (srcbb, i);
543 edge dste = EDGE_SUCC (dstbb, i);
544 dste->probability =
545 dste->probability * dstbb->count.probability_in (dstbb->count + srcbb->count)
546 + srce->probability * srcbb->count.probability_in (dstbb->count + srcbb->count);
547 }
548 dstbb->count += srcbb->count;
549 }
550 }
551 push_cfun (dstcfun);
552 counts_to_freqs ();
553 compute_function_frequency ();
554 pop_cfun ();
555 for (e = dst->callees; e; e = e->next_callee)
556 {
557 if (e->speculative)
558 continue;
559 e->count = gimple_bb (e->call_stmt)->count;
560 e->frequency = compute_call_stmt_bb_frequency
561 (dst->decl,
562 gimple_bb (e->call_stmt));
563 }
564 for (e = dst->indirect_calls, e2 = src->indirect_calls; e;
565 e2 = (e2 ? e2->next_callee : NULL), e = e->next_callee)
566 {
567 profile_count count = gimple_bb (e->call_stmt)->count;
568 int freq = compute_call_stmt_bb_frequency
569 (dst->decl,
570 gimple_bb (e->call_stmt));
571 /* When call is speculative, we need to re-distribute probabilities
572 the same way as they was. This is not really correct because
573 in the other copy the speculation may differ; but probably it
574 is not really worth the effort. */
575 if (e->speculative)
576 {
577 cgraph_edge *direct, *indirect;
578 cgraph_edge *direct2 = NULL, *indirect2 = NULL;
579 ipa_ref *ref;
580
581 e->speculative_call_info (direct, indirect, ref);
582 gcc_assert (e == indirect);
583 if (e2 && e2->speculative)
584 e2->speculative_call_info (direct2, indirect2, ref);
585 if (indirect->count > profile_count::zero ()
586 || direct->count > profile_count::zero ())
587 {
588 /* We should mismatch earlier if there is no matching
589 indirect edge. */
590 if (!e2)
591 {
592 if (dump_file)
593 fprintf (dump_file,
594 "Mismatch in merging indirect edges\n");
595 }
596 else if (!e2->speculative)
597 indirect->count += e2->count;
598 else if (e2->speculative)
599 {
600 if (DECL_ASSEMBLER_NAME (direct2->callee->decl)
601 != DECL_ASSEMBLER_NAME (direct->callee->decl))
602 {
603 if (direct2->count >= direct->count)
604 {
605 direct->redirect_callee (direct2->callee);
606 indirect->count += indirect2->count
607 + direct->count;
608 direct->count = direct2->count;
609 }
610 else
611 indirect->count += indirect2->count + direct2->count;
612 }
613 else
614 {
615 direct->count += direct2->count;
616 indirect->count += indirect2->count;
617 }
618 }
619 int prob = direct->count.probability_in (direct->count
620 + indirect->count).
621 to_reg_br_prob_base ();
622 direct->frequency = RDIV (freq * prob, REG_BR_PROB_BASE);
623 indirect->frequency = RDIV (freq * (REG_BR_PROB_BASE - prob),
624 REG_BR_PROB_BASE);
625 }
626 else
627 /* At the moment we should have only profile feedback based
628 speculations when merging. */
629 gcc_unreachable ();
630 }
631 else if (e2 && e2->speculative)
632 {
633 cgraph_edge *direct, *indirect;
634 ipa_ref *ref;
635
636 e2->speculative_call_info (direct, indirect, ref);
637 e->count = count;
638 e->frequency = freq;
639 int prob = direct->count.probability_in (e->count)
640 .to_reg_br_prob_base ();
641 e->make_speculative (direct->callee, direct->count,
642 RDIV (freq * prob, REG_BR_PROB_BASE));
643 }
644 else
645 {
646 e->count = count;
647 e->frequency = freq;
648 }
649 }
650 if (!preserve_body)
651 src->release_body ();
652 ipa_update_overall_fn_summary (dst);
653 }
654 /* TODO: if there is no match, we can scale up. */
655 src->decl = oldsrcdecl;
656 }
657
658 /* Return true if call to DEST is known to be self-recusive call withing FUNC. */
659
660 bool
661 recursive_call_p (tree func, tree dest)
662 {
663 struct cgraph_node *dest_node = cgraph_node::get_create (dest);
664 struct cgraph_node *cnode = cgraph_node::get_create (func);
665 ipa_ref *alias;
666 enum availability avail;
667
668 gcc_assert (!cnode->alias);
669 if (cnode != dest_node->ultimate_alias_target (&avail))
670 return false;
671 if (avail >= AVAIL_AVAILABLE)
672 return true;
673 if (!dest_node->semantically_equivalent_p (cnode))
674 return false;
675 /* If there is only one way to call the fuction or we know all of them
676 are semantically equivalent, we still can consider call recursive. */
677 FOR_EACH_ALIAS (cnode, alias)
678 if (!dest_node->semantically_equivalent_p (alias->referring))
679 return false;
680 return true;
681 }