0
|
1 /* Callgraph handling code.
|
|
2 Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008
|
|
3 Free Software Foundation, Inc.
|
|
4 Contributed by Jan Hubicka
|
|
5
|
|
6 This file is part of GCC.
|
|
7
|
|
8 GCC is free software; you can redistribute it and/or modify it under
|
|
9 the terms of the GNU General Public License as published by the Free
|
|
10 Software Foundation; either version 3, or (at your option) any later
|
|
11 version.
|
|
12
|
|
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
|
|
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
16 for more details.
|
|
17
|
|
18 You should have received a copy of the GNU General Public License
|
|
19 along with GCC; see the file COPYING3. If not see
|
|
20 <http://www.gnu.org/licenses/>. */
|
|
21
|
|
22 /* This file contains basic routines manipulating call graph
|
|
23
|
|
24 The callgraph:
|
|
25
|
|
26 The call-graph is data structure designed for intra-procedural optimization
|
|
27 but it is also used in non-unit-at-a-time compilation to allow easier code
|
|
28 sharing.
|
|
29
|
|
30 The call-graph consist of nodes and edges represented via linked lists.
|
|
31 Each function (external or not) corresponds to the unique node.
|
|
32
|
|
33 The mapping from declarations to call-graph nodes is done using hash table
|
|
34 based on DECL_UID. The call-graph nodes are created lazily using
|
|
35 cgraph_node function when called for unknown declaration.
|
|
36
|
|
37 The callgraph at the moment does not represent indirect calls or calls
|
|
38 from other compilation unit. Flag NEEDED is set for each node that may
|
|
39 be accessed in such an invisible way and it shall be considered an
|
|
40 entry point to the callgraph.
|
|
41
|
|
42 Interprocedural information:
|
|
43
|
|
44 Callgraph is place to store data needed for interprocedural optimization.
|
|
45 All data structures are divided into three components: local_info that
|
|
46 is produced while analyzing the function, global_info that is result
|
|
47 of global walking of the callgraph on the end of compilation and
|
|
48 rtl_info used by RTL backend to propagate data from already compiled
|
|
49 functions to their callers.
|
|
50
|
|
51 Inlining plans:
|
|
52
|
|
53 The function inlining information is decided in advance and maintained
|
|
54 in the callgraph as so called inline plan.
|
|
55 For each inlined call, the callee's node is cloned to represent the
|
|
56 new function copy produced by inliner.
|
|
57 Each inlined call gets a unique corresponding clone node of the callee
|
|
58 and the data structure is updated while inlining is performed, so
|
|
59 the clones are eliminated and their callee edges redirected to the
|
|
60 caller.
|
|
61
|
|
62 Each edge has "inline_failed" field. When the field is set to NULL,
|
|
63 the call will be inlined. When it is non-NULL it contains a reason
|
|
64 why inlining wasn't performed. */
|
|
65
|
|
66 #include "config.h"
|
|
67 #include "system.h"
|
|
68 #include "coretypes.h"
|
|
69 #include "tm.h"
|
|
70 #include "tree.h"
|
|
71 #include "tree-inline.h"
|
|
72 #include "langhooks.h"
|
|
73 #include "hashtab.h"
|
|
74 #include "toplev.h"
|
|
75 #include "flags.h"
|
|
76 #include "ggc.h"
|
|
77 #include "debug.h"
|
|
78 #include "target.h"
|
|
79 #include "basic-block.h"
|
|
80 #include "cgraph.h"
|
|
81 #include "varray.h"
|
|
82 #include "output.h"
|
|
83 #include "intl.h"
|
|
84 #include "gimple.h"
|
|
85 #include "tree-dump.h"
|
|
86 #include "tree-flow.h"
|
|
87 #include "value-prof.h"
|
|
88
|
|
89 static void cgraph_node_remove_callers (struct cgraph_node *node);
|
|
90 static inline void cgraph_edge_remove_caller (struct cgraph_edge *e);
|
|
91 static inline void cgraph_edge_remove_callee (struct cgraph_edge *e);
|
|
92
|
|
93 /* Hash table used to convert declarations into nodes. */
|
|
94 static GTY((param_is (struct cgraph_node))) htab_t cgraph_hash;
|
|
95 /* Hash table used to convert assembler names into nodes. */
|
|
96 static GTY((param_is (struct cgraph_node))) htab_t assembler_name_hash;
|
|
97
|
|
98 /* The linked list of cgraph nodes. */
|
|
99 struct cgraph_node *cgraph_nodes;
|
|
100
|
|
101 /* Queue of cgraph nodes scheduled to be lowered. */
|
|
102 struct cgraph_node *cgraph_nodes_queue;
|
|
103
|
|
104 /* Queue of cgraph nodes scheduled to be added into cgraph. This is a
|
|
105 secondary queue used during optimization to accommodate passes that
|
|
106 may generate new functions that need to be optimized and expanded. */
|
|
107 struct cgraph_node *cgraph_new_nodes;
|
|
108
|
|
109 /* Number of nodes in existence. */
|
|
110 int cgraph_n_nodes;
|
|
111
|
|
112 /* Maximal uid used in cgraph nodes. */
|
|
113 int cgraph_max_uid;
|
|
114
|
|
115 /* Maximal uid used in cgraph edges. */
|
|
116 int cgraph_edge_max_uid;
|
|
117
|
|
118 /* Maximal pid used for profiling */
|
|
119 int cgraph_max_pid;
|
|
120
|
|
121 /* Set when whole unit has been analyzed so we can access global info. */
|
|
122 bool cgraph_global_info_ready = false;
|
|
123
|
|
124 /* What state callgraph is in right now. */
|
|
125 enum cgraph_state cgraph_state = CGRAPH_STATE_CONSTRUCTION;
|
|
126
|
|
127 /* Set when the cgraph is fully build and the basic flags are computed. */
|
|
128 bool cgraph_function_flags_ready = false;
|
|
129
|
|
130 /* Linked list of cgraph asm nodes. */
|
|
131 struct cgraph_asm_node *cgraph_asm_nodes;
|
|
132
|
|
133 /* Last node in cgraph_asm_nodes. */
|
|
134 static GTY(()) struct cgraph_asm_node *cgraph_asm_last_node;
|
|
135
|
|
136 /* The order index of the next cgraph node to be created. This is
|
|
137 used so that we can sort the cgraph nodes in order by when we saw
|
|
138 them, to support -fno-toplevel-reorder. */
|
|
139 int cgraph_order;
|
|
140
|
|
141 /* List of hooks trigerred on cgraph_edge events. */
|
|
142 struct cgraph_edge_hook_list {
|
|
143 cgraph_edge_hook hook;
|
|
144 void *data;
|
|
145 struct cgraph_edge_hook_list *next;
|
|
146 };
|
|
147
|
|
148 /* List of hooks trigerred on cgraph_node events. */
|
|
149 struct cgraph_node_hook_list {
|
|
150 cgraph_node_hook hook;
|
|
151 void *data;
|
|
152 struct cgraph_node_hook_list *next;
|
|
153 };
|
|
154
|
|
155 /* List of hooks trigerred on events involving two cgraph_edges. */
|
|
156 struct cgraph_2edge_hook_list {
|
|
157 cgraph_2edge_hook hook;
|
|
158 void *data;
|
|
159 struct cgraph_2edge_hook_list *next;
|
|
160 };
|
|
161
|
|
162 /* List of hooks trigerred on events involving two cgraph_nodes. */
|
|
163 struct cgraph_2node_hook_list {
|
|
164 cgraph_2node_hook hook;
|
|
165 void *data;
|
|
166 struct cgraph_2node_hook_list *next;
|
|
167 };
|
|
168
|
|
169 /* List of hooks triggered when an edge is removed. */
|
|
170 struct cgraph_edge_hook_list *first_cgraph_edge_removal_hook;
|
|
171 /* List of hooks triggered when a node is removed. */
|
|
172 struct cgraph_node_hook_list *first_cgraph_node_removal_hook;
|
|
173 /* List of hooks triggered when an edge is duplicated. */
|
|
174 struct cgraph_2edge_hook_list *first_cgraph_edge_duplicated_hook;
|
|
175 /* List of hooks triggered when a node is duplicated. */
|
|
176 struct cgraph_2node_hook_list *first_cgraph_node_duplicated_hook;
|
|
177 /* List of hooks triggered when an function is inserted. */
|
|
178 struct cgraph_node_hook_list *first_cgraph_function_insertion_hook;
|
|
179
|
|
180 /* Head of a linked list of unused (freed) call graph nodes.
|
|
181 Do not GTY((delete)) this list so UIDs gets reliably recycled. */
|
|
182 static GTY(()) struct cgraph_node *free_nodes;
|
|
183 /* Head of a linked list of unused (freed) call graph edges.
|
|
184 Do not GTY((delete)) this list so UIDs gets reliably recycled. */
|
|
185 static GTY(()) struct cgraph_edge *free_edges;
|
|
186
|
|
187 /* Macros to access the next item in the list of free cgraph nodes and
|
|
188 edges. */
|
|
189 #define NEXT_FREE_NODE(NODE) (NODE)->next
|
|
190 #define NEXT_FREE_EDGE(EDGE) (EDGE)->prev_caller
|
|
191
|
|
192 /* Register HOOK to be called with DATA on each removed edge. */
|
|
193 struct cgraph_edge_hook_list *
|
|
194 cgraph_add_edge_removal_hook (cgraph_edge_hook hook, void *data)
|
|
195 {
|
|
196 struct cgraph_edge_hook_list *entry;
|
|
197 struct cgraph_edge_hook_list **ptr = &first_cgraph_edge_removal_hook;
|
|
198
|
|
199 entry = (struct cgraph_edge_hook_list *) xmalloc (sizeof (*entry));
|
|
200 entry->hook = hook;
|
|
201 entry->data = data;
|
|
202 entry->next = NULL;
|
|
203 while (*ptr)
|
|
204 ptr = &(*ptr)->next;
|
|
205 *ptr = entry;
|
|
206 return entry;
|
|
207 }
|
|
208
|
|
209 /* Remove ENTRY from the list of hooks called on removing edges. */
|
|
210 void
|
|
211 cgraph_remove_edge_removal_hook (struct cgraph_edge_hook_list *entry)
|
|
212 {
|
|
213 struct cgraph_edge_hook_list **ptr = &first_cgraph_edge_removal_hook;
|
|
214
|
|
215 while (*ptr != entry)
|
|
216 ptr = &(*ptr)->next;
|
|
217 *ptr = entry->next;
|
|
218 free (entry);
|
|
219 }
|
|
220
|
|
221 /* Call all edge removal hooks. */
|
|
222 static void
|
|
223 cgraph_call_edge_removal_hooks (struct cgraph_edge *e)
|
|
224 {
|
|
225 struct cgraph_edge_hook_list *entry = first_cgraph_edge_removal_hook;
|
|
226 while (entry)
|
|
227 {
|
|
228 entry->hook (e, entry->data);
|
|
229 entry = entry->next;
|
|
230 }
|
|
231 }
|
|
232
|
|
233 /* Register HOOK to be called with DATA on each removed node. */
|
|
234 struct cgraph_node_hook_list *
|
|
235 cgraph_add_node_removal_hook (cgraph_node_hook hook, void *data)
|
|
236 {
|
|
237 struct cgraph_node_hook_list *entry;
|
|
238 struct cgraph_node_hook_list **ptr = &first_cgraph_node_removal_hook;
|
|
239
|
|
240 entry = (struct cgraph_node_hook_list *) xmalloc (sizeof (*entry));
|
|
241 entry->hook = hook;
|
|
242 entry->data = data;
|
|
243 entry->next = NULL;
|
|
244 while (*ptr)
|
|
245 ptr = &(*ptr)->next;
|
|
246 *ptr = entry;
|
|
247 return entry;
|
|
248 }
|
|
249
|
|
250 /* Remove ENTRY from the list of hooks called on removing nodes. */
|
|
251 void
|
|
252 cgraph_remove_node_removal_hook (struct cgraph_node_hook_list *entry)
|
|
253 {
|
|
254 struct cgraph_node_hook_list **ptr = &first_cgraph_node_removal_hook;
|
|
255
|
|
256 while (*ptr != entry)
|
|
257 ptr = &(*ptr)->next;
|
|
258 *ptr = entry->next;
|
|
259 free (entry);
|
|
260 }
|
|
261
|
|
262 /* Call all node removal hooks. */
|
|
263 static void
|
|
264 cgraph_call_node_removal_hooks (struct cgraph_node *node)
|
|
265 {
|
|
266 struct cgraph_node_hook_list *entry = first_cgraph_node_removal_hook;
|
|
267 while (entry)
|
|
268 {
|
|
269 entry->hook (node, entry->data);
|
|
270 entry = entry->next;
|
|
271 }
|
|
272 }
|
|
273
|
|
274 /* Register HOOK to be called with DATA on each removed node. */
|
|
275 struct cgraph_node_hook_list *
|
|
276 cgraph_add_function_insertion_hook (cgraph_node_hook hook, void *data)
|
|
277 {
|
|
278 struct cgraph_node_hook_list *entry;
|
|
279 struct cgraph_node_hook_list **ptr = &first_cgraph_function_insertion_hook;
|
|
280
|
|
281 entry = (struct cgraph_node_hook_list *) xmalloc (sizeof (*entry));
|
|
282 entry->hook = hook;
|
|
283 entry->data = data;
|
|
284 entry->next = NULL;
|
|
285 while (*ptr)
|
|
286 ptr = &(*ptr)->next;
|
|
287 *ptr = entry;
|
|
288 return entry;
|
|
289 }
|
|
290
|
|
291 /* Remove ENTRY from the list of hooks called on removing nodes. */
|
|
292 void
|
|
293 cgraph_remove_function_insertion_hook (struct cgraph_node_hook_list *entry)
|
|
294 {
|
|
295 struct cgraph_node_hook_list **ptr = &first_cgraph_function_insertion_hook;
|
|
296
|
|
297 while (*ptr != entry)
|
|
298 ptr = &(*ptr)->next;
|
|
299 *ptr = entry->next;
|
|
300 free (entry);
|
|
301 }
|
|
302
|
|
303 /* Call all node removal hooks. */
|
|
304 void
|
|
305 cgraph_call_function_insertion_hooks (struct cgraph_node *node)
|
|
306 {
|
|
307 struct cgraph_node_hook_list *entry = first_cgraph_function_insertion_hook;
|
|
308 while (entry)
|
|
309 {
|
|
310 entry->hook (node, entry->data);
|
|
311 entry = entry->next;
|
|
312 }
|
|
313 }
|
|
314
|
|
315 /* Register HOOK to be called with DATA on each duplicated edge. */
|
|
316 struct cgraph_2edge_hook_list *
|
|
317 cgraph_add_edge_duplication_hook (cgraph_2edge_hook hook, void *data)
|
|
318 {
|
|
319 struct cgraph_2edge_hook_list *entry;
|
|
320 struct cgraph_2edge_hook_list **ptr = &first_cgraph_edge_duplicated_hook;
|
|
321
|
|
322 entry = (struct cgraph_2edge_hook_list *) xmalloc (sizeof (*entry));
|
|
323 entry->hook = hook;
|
|
324 entry->data = data;
|
|
325 entry->next = NULL;
|
|
326 while (*ptr)
|
|
327 ptr = &(*ptr)->next;
|
|
328 *ptr = entry;
|
|
329 return entry;
|
|
330 }
|
|
331
|
|
332 /* Remove ENTRY from the list of hooks called on duplicating edges. */
|
|
333 void
|
|
334 cgraph_remove_edge_duplication_hook (struct cgraph_2edge_hook_list *entry)
|
|
335 {
|
|
336 struct cgraph_2edge_hook_list **ptr = &first_cgraph_edge_duplicated_hook;
|
|
337
|
|
338 while (*ptr != entry)
|
|
339 ptr = &(*ptr)->next;
|
|
340 *ptr = entry->next;
|
|
341 free (entry);
|
|
342 }
|
|
343
|
|
344 /* Call all edge duplication hooks. */
|
|
345 static void
|
|
346 cgraph_call_edge_duplication_hooks (struct cgraph_edge *cs1,
|
|
347 struct cgraph_edge *cs2)
|
|
348 {
|
|
349 struct cgraph_2edge_hook_list *entry = first_cgraph_edge_duplicated_hook;
|
|
350 while (entry)
|
|
351 {
|
|
352 entry->hook (cs1, cs2, entry->data);
|
|
353 entry = entry->next;
|
|
354 }
|
|
355 }
|
|
356
|
|
357 /* Register HOOK to be called with DATA on each duplicated node. */
|
|
358 struct cgraph_2node_hook_list *
|
|
359 cgraph_add_node_duplication_hook (cgraph_2node_hook hook, void *data)
|
|
360 {
|
|
361 struct cgraph_2node_hook_list *entry;
|
|
362 struct cgraph_2node_hook_list **ptr = &first_cgraph_node_duplicated_hook;
|
|
363
|
|
364 entry = (struct cgraph_2node_hook_list *) xmalloc (sizeof (*entry));
|
|
365 entry->hook = hook;
|
|
366 entry->data = data;
|
|
367 entry->next = NULL;
|
|
368 while (*ptr)
|
|
369 ptr = &(*ptr)->next;
|
|
370 *ptr = entry;
|
|
371 return entry;
|
|
372 }
|
|
373
|
|
374 /* Remove ENTRY from the list of hooks called on duplicating nodes. */
|
|
375 void
|
|
376 cgraph_remove_node_duplication_hook (struct cgraph_2node_hook_list *entry)
|
|
377 {
|
|
378 struct cgraph_2node_hook_list **ptr = &first_cgraph_node_duplicated_hook;
|
|
379
|
|
380 while (*ptr != entry)
|
|
381 ptr = &(*ptr)->next;
|
|
382 *ptr = entry->next;
|
|
383 free (entry);
|
|
384 }
|
|
385
|
|
386 /* Call all node duplication hooks. */
|
|
387 static void
|
|
388 cgraph_call_node_duplication_hooks (struct cgraph_node *node1,
|
|
389 struct cgraph_node *node2)
|
|
390 {
|
|
391 struct cgraph_2node_hook_list *entry = first_cgraph_node_duplicated_hook;
|
|
392 while (entry)
|
|
393 {
|
|
394 entry->hook (node1, node2, entry->data);
|
|
395 entry = entry->next;
|
|
396 }
|
|
397 }
|
|
398
|
|
399 /* Returns a hash code for P. */
|
|
400
|
|
401 static hashval_t
|
|
402 hash_node (const void *p)
|
|
403 {
|
|
404 const struct cgraph_node *n = (const struct cgraph_node *) p;
|
|
405 return (hashval_t) DECL_UID (n->decl);
|
|
406 }
|
|
407
|
|
408 /* Returns nonzero if P1 and P2 are equal. */
|
|
409
|
|
410 static int
|
|
411 eq_node (const void *p1, const void *p2)
|
|
412 {
|
|
413 const struct cgraph_node *n1 = (const struct cgraph_node *) p1;
|
|
414 const struct cgraph_node *n2 = (const struct cgraph_node *) p2;
|
|
415 return DECL_UID (n1->decl) == DECL_UID (n2->decl);
|
|
416 }
|
|
417
|
|
418 /* Allocate new callgraph node and insert it into basic data structures. */
|
|
419
|
|
420 static struct cgraph_node *
|
|
421 cgraph_create_node (void)
|
|
422 {
|
|
423 struct cgraph_node *node;
|
|
424
|
|
425 if (free_nodes)
|
|
426 {
|
|
427 node = free_nodes;
|
|
428 free_nodes = NEXT_FREE_NODE (node);
|
|
429 }
|
|
430 else
|
|
431 {
|
|
432 node = GGC_CNEW (struct cgraph_node);
|
|
433 node->uid = cgraph_max_uid++;
|
|
434 }
|
|
435
|
|
436 node->next = cgraph_nodes;
|
|
437 node->pid = -1;
|
|
438 node->order = cgraph_order++;
|
|
439 if (cgraph_nodes)
|
|
440 cgraph_nodes->previous = node;
|
|
441 node->previous = NULL;
|
|
442 node->global.estimated_growth = INT_MIN;
|
|
443 cgraph_nodes = node;
|
|
444 cgraph_n_nodes++;
|
|
445 return node;
|
|
446 }
|
|
447
|
|
448 /* Return cgraph node assigned to DECL. Create new one when needed. */
|
|
449
|
|
450 struct cgraph_node *
|
|
451 cgraph_node (tree decl)
|
|
452 {
|
|
453 struct cgraph_node key, *node, **slot;
|
|
454
|
|
455 gcc_assert (TREE_CODE (decl) == FUNCTION_DECL);
|
|
456
|
|
457 if (!cgraph_hash)
|
|
458 cgraph_hash = htab_create_ggc (10, hash_node, eq_node, NULL);
|
|
459
|
|
460 key.decl = decl;
|
|
461
|
|
462 slot = (struct cgraph_node **) htab_find_slot (cgraph_hash, &key, INSERT);
|
|
463
|
|
464 if (*slot)
|
|
465 {
|
|
466 node = *slot;
|
|
467 if (!node->master_clone)
|
|
468 node->master_clone = node;
|
|
469 return node;
|
|
470 }
|
|
471
|
|
472 node = cgraph_create_node ();
|
|
473 node->decl = decl;
|
|
474 *slot = node;
|
|
475 if (DECL_CONTEXT (decl) && TREE_CODE (DECL_CONTEXT (decl)) == FUNCTION_DECL)
|
|
476 {
|
|
477 node->origin = cgraph_node (DECL_CONTEXT (decl));
|
|
478 node->next_nested = node->origin->nested;
|
|
479 node->origin->nested = node;
|
|
480 node->master_clone = node;
|
|
481 }
|
|
482 if (assembler_name_hash)
|
|
483 {
|
|
484 void **aslot;
|
|
485 tree name = DECL_ASSEMBLER_NAME (decl);
|
|
486
|
|
487 aslot = htab_find_slot_with_hash (assembler_name_hash, name,
|
|
488 decl_assembler_name_hash (name),
|
|
489 INSERT);
|
|
490 /* We can have multiple declarations with same assembler name. For C++
|
|
491 it is __builtin_strlen and strlen, for instance. Do we need to
|
|
492 record them all? Original implementation marked just first one
|
|
493 so lets hope for the best. */
|
|
494 if (*aslot == NULL)
|
|
495 *aslot = node;
|
|
496 }
|
|
497 return node;
|
|
498 }
|
|
499
|
|
500 /* Insert already constructed node into hashtable. */
|
|
501
|
|
502 void
|
|
503 cgraph_insert_node_to_hashtable (struct cgraph_node *node)
|
|
504 {
|
|
505 struct cgraph_node **slot;
|
|
506
|
|
507 slot = (struct cgraph_node **) htab_find_slot (cgraph_hash, node, INSERT);
|
|
508
|
|
509 gcc_assert (!*slot);
|
|
510 *slot = node;
|
|
511 }
|
|
512
|
|
513 /* Returns a hash code for P. */
|
|
514
|
|
515 static hashval_t
|
|
516 hash_node_by_assembler_name (const void *p)
|
|
517 {
|
|
518 const struct cgraph_node *n = (const struct cgraph_node *) p;
|
|
519 return (hashval_t) decl_assembler_name_hash (DECL_ASSEMBLER_NAME (n->decl));
|
|
520 }
|
|
521
|
|
522 /* Returns nonzero if P1 and P2 are equal. */
|
|
523
|
|
524 static int
|
|
525 eq_assembler_name (const void *p1, const void *p2)
|
|
526 {
|
|
527 const struct cgraph_node *n1 = (const struct cgraph_node *) p1;
|
|
528 const_tree name = (const_tree)p2;
|
|
529 return (decl_assembler_name_equal (n1->decl, name));
|
|
530 }
|
|
531
|
|
532 /* Return the cgraph node that has ASMNAME for its DECL_ASSEMBLER_NAME.
|
|
533 Return NULL if there's no such node. */
|
|
534
|
|
535 struct cgraph_node *
|
|
536 cgraph_node_for_asm (tree asmname)
|
|
537 {
|
|
538 struct cgraph_node *node;
|
|
539 void **slot;
|
|
540
|
|
541 if (!assembler_name_hash)
|
|
542 {
|
|
543 assembler_name_hash =
|
|
544 htab_create_ggc (10, hash_node_by_assembler_name, eq_assembler_name,
|
|
545 NULL);
|
|
546 for (node = cgraph_nodes; node; node = node->next)
|
|
547 if (!node->global.inlined_to)
|
|
548 {
|
|
549 tree name = DECL_ASSEMBLER_NAME (node->decl);
|
|
550 slot = htab_find_slot_with_hash (assembler_name_hash, name,
|
|
551 decl_assembler_name_hash (name),
|
|
552 INSERT);
|
|
553 /* We can have multiple declarations with same assembler name. For C++
|
|
554 it is __builtin_strlen and strlen, for instance. Do we need to
|
|
555 record them all? Original implementation marked just first one
|
|
556 so lets hope for the best. */
|
|
557 if (*slot)
|
|
558 continue;
|
|
559 *slot = node;
|
|
560 }
|
|
561 }
|
|
562
|
|
563 slot = htab_find_slot_with_hash (assembler_name_hash, asmname,
|
|
564 decl_assembler_name_hash (asmname),
|
|
565 NO_INSERT);
|
|
566
|
|
567 if (slot)
|
|
568 return (struct cgraph_node *) *slot;
|
|
569 return NULL;
|
|
570 }
|
|
571
|
|
572 /* Returns a hash value for X (which really is a die_struct). */
|
|
573
|
|
574 static hashval_t
|
|
575 edge_hash (const void *x)
|
|
576 {
|
|
577 return htab_hash_pointer (((const struct cgraph_edge *) x)->call_stmt);
|
|
578 }
|
|
579
|
|
580 /* Return nonzero if decl_id of die_struct X is the same as UID of decl *Y. */
|
|
581
|
|
582 static int
|
|
583 edge_eq (const void *x, const void *y)
|
|
584 {
|
|
585 return ((const struct cgraph_edge *) x)->call_stmt == y;
|
|
586 }
|
|
587
|
|
588
|
|
589 /* Return the callgraph edge representing the GIMPLE_CALL statement
|
|
590 CALL_STMT. */
|
|
591
|
|
592 struct cgraph_edge *
|
|
593 cgraph_edge (struct cgraph_node *node, gimple call_stmt)
|
|
594 {
|
|
595 struct cgraph_edge *e, *e2;
|
|
596 int n = 0;
|
|
597
|
|
598 if (node->call_site_hash)
|
|
599 return (struct cgraph_edge *)
|
|
600 htab_find_with_hash (node->call_site_hash, call_stmt,
|
|
601 htab_hash_pointer (call_stmt));
|
|
602
|
|
603 /* This loop may turn out to be performance problem. In such case adding
|
|
604 hashtables into call nodes with very many edges is probably best
|
|
605 solution. It is not good idea to add pointer into CALL_EXPR itself
|
|
606 because we want to make possible having multiple cgraph nodes representing
|
|
607 different clones of the same body before the body is actually cloned. */
|
|
608 for (e = node->callees; e; e= e->next_callee)
|
|
609 {
|
|
610 if (e->call_stmt == call_stmt)
|
|
611 break;
|
|
612 n++;
|
|
613 }
|
|
614
|
|
615 if (n > 100)
|
|
616 {
|
|
617 node->call_site_hash = htab_create_ggc (120, edge_hash, edge_eq, NULL);
|
|
618 for (e2 = node->callees; e2; e2 = e2->next_callee)
|
|
619 {
|
|
620 void **slot;
|
|
621 slot = htab_find_slot_with_hash (node->call_site_hash,
|
|
622 e2->call_stmt,
|
|
623 htab_hash_pointer (e2->call_stmt),
|
|
624 INSERT);
|
|
625 gcc_assert (!*slot);
|
|
626 *slot = e2;
|
|
627 }
|
|
628 }
|
|
629
|
|
630 return e;
|
|
631 }
|
|
632
|
|
633
|
|
634 /* Change field call_smt of edge E to NEW_STMT. */
|
|
635
|
|
636 void
|
|
637 cgraph_set_call_stmt (struct cgraph_edge *e, gimple new_stmt)
|
|
638 {
|
|
639 if (e->caller->call_site_hash)
|
|
640 {
|
|
641 htab_remove_elt_with_hash (e->caller->call_site_hash,
|
|
642 e->call_stmt,
|
|
643 htab_hash_pointer (e->call_stmt));
|
|
644 }
|
|
645 e->call_stmt = new_stmt;
|
|
646 if (e->caller->call_site_hash)
|
|
647 {
|
|
648 void **slot;
|
|
649 slot = htab_find_slot_with_hash (e->caller->call_site_hash,
|
|
650 e->call_stmt,
|
|
651 htab_hash_pointer
|
|
652 (e->call_stmt), INSERT);
|
|
653 gcc_assert (!*slot);
|
|
654 *slot = e;
|
|
655 }
|
|
656 }
|
|
657
|
|
658 /* Create edge from CALLER to CALLEE in the cgraph. */
|
|
659
|
|
660 struct cgraph_edge *
|
|
661 cgraph_create_edge (struct cgraph_node *caller, struct cgraph_node *callee,
|
|
662 gimple call_stmt, gcov_type count, int freq, int nest)
|
|
663 {
|
|
664 struct cgraph_edge *edge;
|
|
665
|
|
666 #ifdef ENABLE_CHECKING
|
|
667 /* This is rather pricely check possibly trigerring construction of call stmt
|
|
668 hashtable. */
|
|
669 gcc_assert (!cgraph_edge (caller, call_stmt));
|
|
670 #endif
|
|
671
|
|
672 gcc_assert (is_gimple_call (call_stmt));
|
|
673
|
|
674 if (free_edges)
|
|
675 {
|
|
676 edge = free_edges;
|
|
677 free_edges = NEXT_FREE_EDGE (edge);
|
|
678 }
|
|
679 else
|
|
680 {
|
|
681 edge = GGC_NEW (struct cgraph_edge);
|
|
682 edge->uid = cgraph_edge_max_uid++;
|
|
683 }
|
|
684
|
|
685 if (!callee->analyzed)
|
|
686 edge->inline_failed = N_("function body not available");
|
|
687 else if (callee->local.redefined_extern_inline)
|
|
688 edge->inline_failed = N_("redefined extern inline functions are not "
|
|
689 "considered for inlining");
|
|
690 else if (callee->local.inlinable)
|
|
691 edge->inline_failed = N_("function not considered for inlining");
|
|
692 else
|
|
693 edge->inline_failed = N_("function not inlinable");
|
|
694
|
|
695 edge->aux = NULL;
|
|
696
|
|
697 edge->caller = caller;
|
|
698 edge->callee = callee;
|
|
699 edge->call_stmt = call_stmt;
|
|
700 edge->prev_caller = NULL;
|
|
701 edge->next_caller = callee->callers;
|
|
702 if (callee->callers)
|
|
703 callee->callers->prev_caller = edge;
|
|
704 edge->prev_callee = NULL;
|
|
705 edge->next_callee = caller->callees;
|
|
706 if (caller->callees)
|
|
707 caller->callees->prev_callee = edge;
|
|
708 caller->callees = edge;
|
|
709 callee->callers = edge;
|
|
710 edge->count = count;
|
|
711 gcc_assert (count >= 0);
|
|
712 edge->frequency = freq;
|
|
713 gcc_assert (freq >= 0);
|
|
714 gcc_assert (freq <= CGRAPH_FREQ_MAX);
|
|
715 edge->loop_nest = nest;
|
|
716 edge->indirect_call = 0;
|
|
717 if (caller->call_site_hash)
|
|
718 {
|
|
719 void **slot;
|
|
720 slot = htab_find_slot_with_hash (caller->call_site_hash,
|
|
721 edge->call_stmt,
|
|
722 htab_hash_pointer
|
|
723 (edge->call_stmt),
|
|
724 INSERT);
|
|
725 gcc_assert (!*slot);
|
|
726 *slot = edge;
|
|
727 }
|
|
728 return edge;
|
|
729 }
|
|
730
|
|
731 /* Remove the edge E from the list of the callers of the callee. */
|
|
732
|
|
733 static inline void
|
|
734 cgraph_edge_remove_callee (struct cgraph_edge *e)
|
|
735 {
|
|
736 if (e->prev_caller)
|
|
737 e->prev_caller->next_caller = e->next_caller;
|
|
738 if (e->next_caller)
|
|
739 e->next_caller->prev_caller = e->prev_caller;
|
|
740 if (!e->prev_caller)
|
|
741 e->callee->callers = e->next_caller;
|
|
742 }
|
|
743
|
|
744 /* Remove the edge E from the list of the callees of the caller. */
|
|
745
|
|
746 static inline void
|
|
747 cgraph_edge_remove_caller (struct cgraph_edge *e)
|
|
748 {
|
|
749 if (e->prev_callee)
|
|
750 e->prev_callee->next_callee = e->next_callee;
|
|
751 if (e->next_callee)
|
|
752 e->next_callee->prev_callee = e->prev_callee;
|
|
753 if (!e->prev_callee)
|
|
754 e->caller->callees = e->next_callee;
|
|
755 if (e->caller->call_site_hash)
|
|
756 htab_remove_elt_with_hash (e->caller->call_site_hash,
|
|
757 e->call_stmt,
|
|
758 htab_hash_pointer (e->call_stmt));
|
|
759 }
|
|
760
|
|
761 /* Put the edge onto the free list. */
|
|
762
|
|
763 static void
|
|
764 cgraph_free_edge (struct cgraph_edge *e)
|
|
765 {
|
|
766 int uid = e->uid;
|
|
767
|
|
768 /* Clear out the edge so we do not dangle pointers. */
|
|
769 memset (e, 0, sizeof (*e));
|
|
770 e->uid = uid;
|
|
771 NEXT_FREE_EDGE (e) = free_edges;
|
|
772 free_edges = e;
|
|
773 }
|
|
774
|
|
775 /* Remove the edge E in the cgraph. */
|
|
776
|
|
777 void
|
|
778 cgraph_remove_edge (struct cgraph_edge *e)
|
|
779 {
|
|
780 /* Call all edge removal hooks. */
|
|
781 cgraph_call_edge_removal_hooks (e);
|
|
782
|
|
783 /* Remove from callers list of the callee. */
|
|
784 cgraph_edge_remove_callee (e);
|
|
785
|
|
786 /* Remove from callees list of the callers. */
|
|
787 cgraph_edge_remove_caller (e);
|
|
788
|
|
789 /* Put the edge onto the free list. */
|
|
790 cgraph_free_edge (e);
|
|
791 }
|
|
792
|
|
793 /* Redirect callee of E to N. The function does not update underlying
|
|
794 call expression. */
|
|
795
|
|
796 void
|
|
797 cgraph_redirect_edge_callee (struct cgraph_edge *e, struct cgraph_node *n)
|
|
798 {
|
|
799 /* Remove from callers list of the current callee. */
|
|
800 cgraph_edge_remove_callee (e);
|
|
801
|
|
802 /* Insert to callers list of the new callee. */
|
|
803 e->prev_caller = NULL;
|
|
804 if (n->callers)
|
|
805 n->callers->prev_caller = e;
|
|
806 e->next_caller = n->callers;
|
|
807 n->callers = e;
|
|
808 e->callee = n;
|
|
809 }
|
|
810
|
|
811
|
|
812 /* Update or remove the corresponding cgraph edge if a GIMPLE_CALL
|
|
813 OLD_STMT changed into NEW_STMT. */
|
|
814
|
|
815 void
|
|
816 cgraph_update_edges_for_call_stmt (gimple old_stmt, gimple new_stmt)
|
|
817 {
|
|
818 tree new_call = (is_gimple_call (new_stmt)) ? gimple_call_fn (new_stmt) : 0;
|
|
819 tree old_call = (is_gimple_call (old_stmt)) ? gimple_call_fn (old_stmt) : 0;
|
|
820 struct cgraph_node *node = cgraph_node (cfun->decl);
|
|
821
|
|
822 if (old_call != new_call)
|
|
823 {
|
|
824 struct cgraph_edge *e = cgraph_edge (node, old_stmt);
|
|
825 struct cgraph_edge *ne = NULL;
|
|
826 tree new_decl;
|
|
827
|
|
828 if (e)
|
|
829 {
|
|
830 gcov_type count = e->count;
|
|
831 int frequency = e->frequency;
|
|
832 int loop_nest = e->loop_nest;
|
|
833
|
|
834 cgraph_remove_edge (e);
|
|
835 if (new_call)
|
|
836 {
|
|
837 new_decl = gimple_call_fndecl (new_stmt);
|
|
838 if (new_decl)
|
|
839 {
|
|
840 ne = cgraph_create_edge (node, cgraph_node (new_decl),
|
|
841 new_stmt, count, frequency,
|
|
842 loop_nest);
|
|
843 gcc_assert (ne->inline_failed);
|
|
844 }
|
|
845 }
|
|
846 }
|
|
847 }
|
|
848 else if (old_stmt != new_stmt)
|
|
849 {
|
|
850 struct cgraph_edge *e = cgraph_edge (node, old_stmt);
|
|
851
|
|
852 if (e)
|
|
853 cgraph_set_call_stmt (e, new_stmt);
|
|
854 }
|
|
855 }
|
|
856
|
|
857
|
|
858 /* Remove all callees from the node. */
|
|
859
|
|
860 void
|
|
861 cgraph_node_remove_callees (struct cgraph_node *node)
|
|
862 {
|
|
863 struct cgraph_edge *e, *f;
|
|
864
|
|
865 /* It is sufficient to remove the edges from the lists of callers of
|
|
866 the callees. The callee list of the node can be zapped with one
|
|
867 assignment. */
|
|
868 for (e = node->callees; e; e = f)
|
|
869 {
|
|
870 f = e->next_callee;
|
|
871 cgraph_call_edge_removal_hooks (e);
|
|
872 cgraph_edge_remove_callee (e);
|
|
873 cgraph_free_edge (e);
|
|
874 }
|
|
875 node->callees = NULL;
|
|
876 if (node->call_site_hash)
|
|
877 {
|
|
878 htab_delete (node->call_site_hash);
|
|
879 node->call_site_hash = NULL;
|
|
880 }
|
|
881 }
|
|
882
|
|
883 /* Remove all callers from the node. */
|
|
884
|
|
885 static void
|
|
886 cgraph_node_remove_callers (struct cgraph_node *node)
|
|
887 {
|
|
888 struct cgraph_edge *e, *f;
|
|
889
|
|
890 /* It is sufficient to remove the edges from the lists of callees of
|
|
891 the callers. The caller list of the node can be zapped with one
|
|
892 assignment. */
|
|
893 for (e = node->callers; e; e = f)
|
|
894 {
|
|
895 f = e->next_caller;
|
|
896 cgraph_call_edge_removal_hooks (e);
|
|
897 cgraph_edge_remove_caller (e);
|
|
898 cgraph_free_edge (e);
|
|
899 }
|
|
900 node->callers = NULL;
|
|
901 }
|
|
902
|
|
903 /* Release memory used to represent body of function NODE. */
|
|
904
|
|
905 void
|
|
906 cgraph_release_function_body (struct cgraph_node *node)
|
|
907 {
|
|
908 if (DECL_STRUCT_FUNCTION (node->decl))
|
|
909 {
|
|
910 tree old_decl = current_function_decl;
|
|
911 push_cfun (DECL_STRUCT_FUNCTION (node->decl));
|
|
912 if (cfun->gimple_df)
|
|
913 {
|
|
914 current_function_decl = node->decl;
|
|
915 delete_tree_ssa ();
|
|
916 delete_tree_cfg_annotations ();
|
|
917 cfun->eh = NULL;
|
|
918 current_function_decl = old_decl;
|
|
919 }
|
|
920 if (cfun->cfg)
|
|
921 {
|
|
922 gcc_assert (dom_computed[0] == DOM_NONE);
|
|
923 gcc_assert (dom_computed[1] == DOM_NONE);
|
|
924 clear_edges ();
|
|
925 }
|
|
926 if (cfun->value_histograms)
|
|
927 free_histograms ();
|
|
928 gcc_assert (!current_loops);
|
|
929 pop_cfun();
|
|
930 gimple_set_body (node->decl, NULL);
|
|
931 VEC_free (ipa_opt_pass, heap,
|
|
932 DECL_STRUCT_FUNCTION (node->decl)->ipa_transforms_to_apply);
|
|
933 /* Struct function hangs a lot of data that would leak if we didn't
|
|
934 removed all pointers to it. */
|
|
935 ggc_free (DECL_STRUCT_FUNCTION (node->decl));
|
|
936 DECL_STRUCT_FUNCTION (node->decl) = NULL;
|
|
937 }
|
|
938 DECL_SAVED_TREE (node->decl) = NULL;
|
|
939 /* If the node is abstract and needed, then do not clear DECL_INITIAL
|
|
940 of its associated function function declaration because it's
|
|
941 needed to emit debug info later. */
|
|
942 if (!node->abstract_and_needed)
|
|
943 DECL_INITIAL (node->decl) = error_mark_node;
|
|
944 }
|
|
945
|
|
946 /* Remove the node from cgraph. */
|
|
947
|
|
948 void
|
|
949 cgraph_remove_node (struct cgraph_node *node)
|
|
950 {
|
|
951 void **slot;
|
|
952 bool kill_body = false;
|
|
953 struct cgraph_node *n;
|
|
954 int uid = node->uid;
|
|
955
|
|
956 cgraph_call_node_removal_hooks (node);
|
|
957 cgraph_node_remove_callers (node);
|
|
958 cgraph_node_remove_callees (node);
|
|
959
|
|
960 /* Incremental inlining access removed nodes stored in the postorder list.
|
|
961 */
|
|
962 node->needed = node->reachable = false;
|
|
963 for (n = node->nested; n; n = n->next_nested)
|
|
964 n->origin = NULL;
|
|
965 node->nested = NULL;
|
|
966 if (node->origin)
|
|
967 {
|
|
968 struct cgraph_node **node2 = &node->origin->nested;
|
|
969
|
|
970 while (*node2 != node)
|
|
971 node2 = &(*node2)->next_nested;
|
|
972 *node2 = node->next_nested;
|
|
973 }
|
|
974 if (node->previous)
|
|
975 node->previous->next = node->next;
|
|
976 else
|
|
977 cgraph_nodes = node->next;
|
|
978 if (node->next)
|
|
979 node->next->previous = node->previous;
|
|
980 node->next = NULL;
|
|
981 node->previous = NULL;
|
|
982 slot = htab_find_slot (cgraph_hash, node, NO_INSERT);
|
|
983 if (*slot == node)
|
|
984 {
|
|
985 if (node->next_clone)
|
|
986 {
|
|
987 struct cgraph_node *new_node = node->next_clone;
|
|
988 struct cgraph_node *n;
|
|
989
|
|
990 /* Make the next clone be the master clone */
|
|
991 for (n = new_node; n; n = n->next_clone)
|
|
992 n->master_clone = new_node;
|
|
993
|
|
994 *slot = new_node;
|
|
995 node->next_clone->prev_clone = NULL;
|
|
996 }
|
|
997 else
|
|
998 {
|
|
999 htab_clear_slot (cgraph_hash, slot);
|
|
1000 kill_body = true;
|
|
1001 }
|
|
1002 }
|
|
1003 else
|
|
1004 {
|
|
1005 node->prev_clone->next_clone = node->next_clone;
|
|
1006 if (node->next_clone)
|
|
1007 node->next_clone->prev_clone = node->prev_clone;
|
|
1008 }
|
|
1009
|
|
1010 /* While all the clones are removed after being proceeded, the function
|
|
1011 itself is kept in the cgraph even after it is compiled. Check whether
|
|
1012 we are done with this body and reclaim it proactively if this is the case.
|
|
1013 */
|
|
1014 if (!kill_body && *slot)
|
|
1015 {
|
|
1016 struct cgraph_node *n = (struct cgraph_node *) *slot;
|
|
1017 if (!n->next_clone && !n->global.inlined_to
|
|
1018 && (cgraph_global_info_ready
|
|
1019 && (TREE_ASM_WRITTEN (n->decl) || DECL_EXTERNAL (n->decl))))
|
|
1020 kill_body = true;
|
|
1021 }
|
|
1022 if (assembler_name_hash)
|
|
1023 {
|
|
1024 tree name = DECL_ASSEMBLER_NAME (node->decl);
|
|
1025 slot = htab_find_slot_with_hash (assembler_name_hash, name,
|
|
1026 decl_assembler_name_hash (name),
|
|
1027 NO_INSERT);
|
|
1028 /* Inline clones are not hashed. */
|
|
1029 if (slot && *slot == node)
|
|
1030 htab_clear_slot (assembler_name_hash, slot);
|
|
1031 }
|
|
1032
|
|
1033 if (kill_body)
|
|
1034 cgraph_release_function_body (node);
|
|
1035 node->decl = NULL;
|
|
1036 if (node->call_site_hash)
|
|
1037 {
|
|
1038 htab_delete (node->call_site_hash);
|
|
1039 node->call_site_hash = NULL;
|
|
1040 }
|
|
1041 cgraph_n_nodes--;
|
|
1042
|
|
1043 /* Clear out the node to NULL all pointers and add the node to the free
|
|
1044 list. */
|
|
1045 memset (node, 0, sizeof(*node));
|
|
1046 node->uid = uid;
|
|
1047 NEXT_FREE_NODE (node) = free_nodes;
|
|
1048 free_nodes = node;
|
|
1049 }
|
|
1050
|
|
1051 /* Notify finalize_compilation_unit that given node is reachable. */
|
|
1052
|
|
1053 void
|
|
1054 cgraph_mark_reachable_node (struct cgraph_node *node)
|
|
1055 {
|
|
1056 if (!node->reachable && node->local.finalized)
|
|
1057 {
|
|
1058 notice_global_symbol (node->decl);
|
|
1059 node->reachable = 1;
|
|
1060 gcc_assert (!cgraph_global_info_ready);
|
|
1061
|
|
1062 node->next_needed = cgraph_nodes_queue;
|
|
1063 cgraph_nodes_queue = node;
|
|
1064 }
|
|
1065 }
|
|
1066
|
|
1067 /* Likewise indicate that a node is needed, i.e. reachable via some
|
|
1068 external means. */
|
|
1069
|
|
1070 void
|
|
1071 cgraph_mark_needed_node (struct cgraph_node *node)
|
|
1072 {
|
|
1073 node->needed = 1;
|
|
1074 cgraph_mark_reachable_node (node);
|
|
1075 }
|
|
1076
|
|
1077 /* Return local info for the compiled function. */
|
|
1078
|
|
1079 struct cgraph_local_info *
|
|
1080 cgraph_local_info (tree decl)
|
|
1081 {
|
|
1082 struct cgraph_node *node;
|
|
1083
|
|
1084 gcc_assert (TREE_CODE (decl) == FUNCTION_DECL);
|
|
1085 node = cgraph_node (decl);
|
|
1086 return &node->local;
|
|
1087 }
|
|
1088
|
|
1089 /* Return local info for the compiled function. */
|
|
1090
|
|
1091 struct cgraph_global_info *
|
|
1092 cgraph_global_info (tree decl)
|
|
1093 {
|
|
1094 struct cgraph_node *node;
|
|
1095
|
|
1096 gcc_assert (TREE_CODE (decl) == FUNCTION_DECL && cgraph_global_info_ready);
|
|
1097 node = cgraph_node (decl);
|
|
1098 return &node->global;
|
|
1099 }
|
|
1100
|
|
1101 /* Return local info for the compiled function. */
|
|
1102
|
|
1103 struct cgraph_rtl_info *
|
|
1104 cgraph_rtl_info (tree decl)
|
|
1105 {
|
|
1106 struct cgraph_node *node;
|
|
1107
|
|
1108 gcc_assert (TREE_CODE (decl) == FUNCTION_DECL);
|
|
1109 node = cgraph_node (decl);
|
|
1110 if (decl != current_function_decl
|
|
1111 && !TREE_ASM_WRITTEN (node->decl))
|
|
1112 return NULL;
|
|
1113 return &node->rtl;
|
|
1114 }
|
|
1115
|
|
1116 /* Return name of the node used in debug output. */
|
|
1117 const char *
|
|
1118 cgraph_node_name (struct cgraph_node *node)
|
|
1119 {
|
|
1120 return lang_hooks.decl_printable_name (node->decl, 2);
|
|
1121 }
|
|
1122
|
|
1123 /* Names used to print out the availability enum. */
|
|
1124 const char * const cgraph_availability_names[] =
|
|
1125 {"unset", "not_available", "overwritable", "available", "local"};
|
|
1126
|
|
1127
|
|
1128 /* Dump call graph node NODE to file F. */
|
|
1129
|
|
1130 void
|
|
1131 dump_cgraph_node (FILE *f, struct cgraph_node *node)
|
|
1132 {
|
|
1133 struct cgraph_edge *edge;
|
|
1134 fprintf (f, "%s/%i(%i):", cgraph_node_name (node), node->uid, node->pid);
|
|
1135 if (node->global.inlined_to)
|
|
1136 fprintf (f, " (inline copy in %s/%i)",
|
|
1137 cgraph_node_name (node->global.inlined_to),
|
|
1138 node->global.inlined_to->uid);
|
|
1139 if (cgraph_function_flags_ready)
|
|
1140 fprintf (f, " availability:%s",
|
|
1141 cgraph_availability_names [cgraph_function_body_availability (node)]);
|
|
1142 if (node->master_clone && node->master_clone->uid != node->uid)
|
|
1143 fprintf (f, "(%i)", node->master_clone->uid);
|
|
1144 if (node->count)
|
|
1145 fprintf (f, " executed "HOST_WIDEST_INT_PRINT_DEC"x",
|
|
1146 (HOST_WIDEST_INT)node->count);
|
|
1147 if (node->local.inline_summary.self_insns)
|
|
1148 fprintf (f, " %i insns", node->local.inline_summary.self_insns);
|
|
1149 if (node->global.insns && node->global.insns
|
|
1150 != node->local.inline_summary.self_insns)
|
|
1151 fprintf (f, " (%i after inlining)", node->global.insns);
|
|
1152 if (node->local.inline_summary.estimated_self_stack_size)
|
|
1153 fprintf (f, " %i bytes stack usage", (int)node->local.inline_summary.estimated_self_stack_size);
|
|
1154 if (node->global.estimated_stack_size != node->local.inline_summary.estimated_self_stack_size)
|
|
1155 fprintf (f, " %i bytes after inlining", (int)node->global.estimated_stack_size);
|
|
1156 if (node->origin)
|
|
1157 fprintf (f, " nested in: %s", cgraph_node_name (node->origin));
|
|
1158 if (node->needed)
|
|
1159 fprintf (f, " needed");
|
|
1160 else if (node->reachable)
|
|
1161 fprintf (f, " reachable");
|
|
1162 if (gimple_has_body_p (node->decl))
|
|
1163 fprintf (f, " body");
|
|
1164 if (node->output)
|
|
1165 fprintf (f, " output");
|
|
1166 if (node->local.local)
|
|
1167 fprintf (f, " local");
|
|
1168 if (node->local.externally_visible)
|
|
1169 fprintf (f, " externally_visible");
|
|
1170 if (node->local.finalized)
|
|
1171 fprintf (f, " finalized");
|
|
1172 if (node->local.disregard_inline_limits)
|
|
1173 fprintf (f, " always_inline");
|
|
1174 else if (node->local.inlinable)
|
|
1175 fprintf (f, " inlinable");
|
|
1176 if (node->local.redefined_extern_inline)
|
|
1177 fprintf (f, " redefined_extern_inline");
|
|
1178 if (TREE_ASM_WRITTEN (node->decl))
|
|
1179 fprintf (f, " asm_written");
|
|
1180
|
|
1181 fprintf (f, "\n called by: ");
|
|
1182 for (edge = node->callers; edge; edge = edge->next_caller)
|
|
1183 {
|
|
1184 fprintf (f, "%s/%i ", cgraph_node_name (edge->caller),
|
|
1185 edge->caller->uid);
|
|
1186 if (edge->count)
|
|
1187 fprintf (f, "("HOST_WIDEST_INT_PRINT_DEC"x) ",
|
|
1188 (HOST_WIDEST_INT)edge->count);
|
|
1189 if (edge->frequency)
|
|
1190 fprintf (f, "(%.2f per call) ",
|
|
1191 edge->frequency / (double)CGRAPH_FREQ_BASE);
|
|
1192 if (!edge->inline_failed)
|
|
1193 fprintf(f, "(inlined) ");
|
|
1194 if (edge->indirect_call)
|
|
1195 fprintf(f, "(indirect) ");
|
|
1196 }
|
|
1197
|
|
1198 fprintf (f, "\n calls: ");
|
|
1199 for (edge = node->callees; edge; edge = edge->next_callee)
|
|
1200 {
|
|
1201 fprintf (f, "%s/%i ", cgraph_node_name (edge->callee),
|
|
1202 edge->callee->uid);
|
|
1203 if (!edge->inline_failed)
|
|
1204 fprintf(f, "(inlined) ");
|
|
1205 if (edge->indirect_call)
|
|
1206 fprintf(f, "(indirect) ");
|
|
1207 if (edge->count)
|
|
1208 fprintf (f, "("HOST_WIDEST_INT_PRINT_DEC"x) ",
|
|
1209 (HOST_WIDEST_INT)edge->count);
|
|
1210 if (edge->frequency)
|
|
1211 fprintf (f, "(%.2f per call) ",
|
|
1212 edge->frequency / (double)CGRAPH_FREQ_BASE);
|
|
1213 if (edge->loop_nest)
|
|
1214 fprintf (f, "(nested in %i loops) ", edge->loop_nest);
|
|
1215 }
|
|
1216 fprintf (f, "\n");
|
|
1217 }
|
|
1218
|
|
1219
|
|
1220 /* Dump call graph node NODE to stderr. */
|
|
1221
|
|
1222 void
|
|
1223 debug_cgraph_node (struct cgraph_node *node)
|
|
1224 {
|
|
1225 dump_cgraph_node (stderr, node);
|
|
1226 }
|
|
1227
|
|
1228
|
|
1229 /* Dump the callgraph to file F. */
|
|
1230
|
|
1231 void
|
|
1232 dump_cgraph (FILE *f)
|
|
1233 {
|
|
1234 struct cgraph_node *node;
|
|
1235
|
|
1236 fprintf (f, "callgraph:\n\n");
|
|
1237 for (node = cgraph_nodes; node; node = node->next)
|
|
1238 dump_cgraph_node (f, node);
|
|
1239 }
|
|
1240
|
|
1241
|
|
1242 /* Dump the call graph to stderr. */
|
|
1243
|
|
1244 void
|
|
1245 debug_cgraph (void)
|
|
1246 {
|
|
1247 dump_cgraph (stderr);
|
|
1248 }
|
|
1249
|
|
1250
|
|
1251 /* Set the DECL_ASSEMBLER_NAME and update cgraph hashtables. */
|
|
1252
|
|
1253 void
|
|
1254 change_decl_assembler_name (tree decl, tree name)
|
|
1255 {
|
|
1256 gcc_assert (!assembler_name_hash);
|
|
1257 if (!DECL_ASSEMBLER_NAME_SET_P (decl))
|
|
1258 {
|
|
1259 SET_DECL_ASSEMBLER_NAME (decl, name);
|
|
1260 return;
|
|
1261 }
|
|
1262 if (name == DECL_ASSEMBLER_NAME (decl))
|
|
1263 return;
|
|
1264
|
|
1265 if (TREE_SYMBOL_REFERENCED (DECL_ASSEMBLER_NAME (decl))
|
|
1266 && DECL_RTL_SET_P (decl))
|
|
1267 warning (0, "%D renamed after being referenced in assembly", decl);
|
|
1268
|
|
1269 SET_DECL_ASSEMBLER_NAME (decl, name);
|
|
1270 }
|
|
1271
|
|
1272 /* Add a top-level asm statement to the list. */
|
|
1273
|
|
1274 struct cgraph_asm_node *
|
|
1275 cgraph_add_asm_node (tree asm_str)
|
|
1276 {
|
|
1277 struct cgraph_asm_node *node;
|
|
1278
|
|
1279 node = GGC_CNEW (struct cgraph_asm_node);
|
|
1280 node->asm_str = asm_str;
|
|
1281 node->order = cgraph_order++;
|
|
1282 node->next = NULL;
|
|
1283 if (cgraph_asm_nodes == NULL)
|
|
1284 cgraph_asm_nodes = node;
|
|
1285 else
|
|
1286 cgraph_asm_last_node->next = node;
|
|
1287 cgraph_asm_last_node = node;
|
|
1288 return node;
|
|
1289 }
|
|
1290
|
|
1291 /* Return true when the DECL can possibly be inlined. */
|
|
1292 bool
|
|
1293 cgraph_function_possibly_inlined_p (tree decl)
|
|
1294 {
|
|
1295 if (!cgraph_global_info_ready)
|
|
1296 return !DECL_UNINLINABLE (decl);
|
|
1297 return DECL_POSSIBLY_INLINED (decl);
|
|
1298 }
|
|
1299
|
|
1300 /* Create clone of E in the node N represented by CALL_EXPR the callgraph. */
|
|
1301 struct cgraph_edge *
|
|
1302 cgraph_clone_edge (struct cgraph_edge *e, struct cgraph_node *n,
|
|
1303 gimple call_stmt, gcov_type count_scale, int freq_scale,
|
|
1304 int loop_nest, bool update_original)
|
|
1305 {
|
|
1306 struct cgraph_edge *new_edge;
|
|
1307 gcov_type count = e->count * count_scale / REG_BR_PROB_BASE;
|
|
1308 gcov_type freq = e->frequency * (gcov_type) freq_scale / CGRAPH_FREQ_BASE;
|
|
1309
|
|
1310 if (freq > CGRAPH_FREQ_MAX)
|
|
1311 freq = CGRAPH_FREQ_MAX;
|
|
1312 new_edge = cgraph_create_edge (n, e->callee, call_stmt, count, freq,
|
|
1313 e->loop_nest + loop_nest);
|
|
1314
|
|
1315 new_edge->inline_failed = e->inline_failed;
|
|
1316 new_edge->indirect_call = e->indirect_call;
|
|
1317 if (update_original)
|
|
1318 {
|
|
1319 e->count -= new_edge->count;
|
|
1320 if (e->count < 0)
|
|
1321 e->count = 0;
|
|
1322 }
|
|
1323 cgraph_call_edge_duplication_hooks (e, new_edge);
|
|
1324 return new_edge;
|
|
1325 }
|
|
1326
|
|
1327 /* Create node representing clone of N executed COUNT times. Decrease
|
|
1328 the execution counts from original node too.
|
|
1329
|
|
1330 When UPDATE_ORIGINAL is true, the counts are subtracted from the original
|
|
1331 function's profile to reflect the fact that part of execution is handled
|
|
1332 by node. */
|
|
1333 struct cgraph_node *
|
|
1334 cgraph_clone_node (struct cgraph_node *n, gcov_type count, int freq,
|
|
1335 int loop_nest, bool update_original)
|
|
1336 {
|
|
1337 struct cgraph_node *new_node = cgraph_create_node ();
|
|
1338 struct cgraph_edge *e;
|
|
1339 gcov_type count_scale;
|
|
1340
|
|
1341 new_node->decl = n->decl;
|
|
1342 new_node->origin = n->origin;
|
|
1343 if (new_node->origin)
|
|
1344 {
|
|
1345 new_node->next_nested = new_node->origin->nested;
|
|
1346 new_node->origin->nested = new_node;
|
|
1347 }
|
|
1348 new_node->analyzed = n->analyzed;
|
|
1349 new_node->local = n->local;
|
|
1350 new_node->global = n->global;
|
|
1351 new_node->rtl = n->rtl;
|
|
1352 new_node->master_clone = n->master_clone;
|
|
1353 new_node->count = count;
|
|
1354 if (n->count)
|
|
1355 {
|
|
1356 if (new_node->count > n->count)
|
|
1357 count_scale = REG_BR_PROB_BASE;
|
|
1358 else
|
|
1359 count_scale = new_node->count * REG_BR_PROB_BASE / n->count;
|
|
1360 }
|
|
1361 else
|
|
1362 count_scale = 0;
|
|
1363 if (update_original)
|
|
1364 {
|
|
1365 n->count -= count;
|
|
1366 if (n->count < 0)
|
|
1367 n->count = 0;
|
|
1368 }
|
|
1369
|
|
1370 for (e = n->callees;e; e=e->next_callee)
|
|
1371 cgraph_clone_edge (e, new_node, e->call_stmt, count_scale, freq, loop_nest,
|
|
1372 update_original);
|
|
1373
|
|
1374 new_node->next_clone = n->next_clone;
|
|
1375 new_node->prev_clone = n;
|
|
1376 n->next_clone = new_node;
|
|
1377 if (new_node->next_clone)
|
|
1378 new_node->next_clone->prev_clone = new_node;
|
|
1379
|
|
1380 cgraph_call_node_duplication_hooks (n, new_node);
|
|
1381 return new_node;
|
|
1382 }
|
|
1383
|
|
1384 /* Return true if N is an master_clone, (see cgraph_master_clone). */
|
|
1385
|
|
1386 bool
|
|
1387 cgraph_is_master_clone (struct cgraph_node *n)
|
|
1388 {
|
|
1389 return (n == cgraph_master_clone (n));
|
|
1390 }
|
|
1391
|
|
1392 struct cgraph_node *
|
|
1393 cgraph_master_clone (struct cgraph_node *n)
|
|
1394 {
|
|
1395 enum availability avail = cgraph_function_body_availability (n);
|
|
1396
|
|
1397 if (avail == AVAIL_NOT_AVAILABLE || avail == AVAIL_OVERWRITABLE)
|
|
1398 return NULL;
|
|
1399
|
|
1400 if (!n->master_clone)
|
|
1401 n->master_clone = cgraph_node (n->decl);
|
|
1402
|
|
1403 return n->master_clone;
|
|
1404 }
|
|
1405
|
|
1406 /* NODE is no longer nested function; update cgraph accordingly. */
|
|
1407 void
|
|
1408 cgraph_unnest_node (struct cgraph_node *node)
|
|
1409 {
|
|
1410 struct cgraph_node **node2 = &node->origin->nested;
|
|
1411 gcc_assert (node->origin);
|
|
1412
|
|
1413 while (*node2 != node)
|
|
1414 node2 = &(*node2)->next_nested;
|
|
1415 *node2 = node->next_nested;
|
|
1416 node->origin = NULL;
|
|
1417 }
|
|
1418
|
|
1419 /* Return function availability. See cgraph.h for description of individual
|
|
1420 return values. */
|
|
1421 enum availability
|
|
1422 cgraph_function_body_availability (struct cgraph_node *node)
|
|
1423 {
|
|
1424 enum availability avail;
|
|
1425 gcc_assert (cgraph_function_flags_ready);
|
|
1426 if (!node->analyzed)
|
|
1427 avail = AVAIL_NOT_AVAILABLE;
|
|
1428 else if (node->local.local)
|
|
1429 avail = AVAIL_LOCAL;
|
|
1430 else if (!node->local.externally_visible)
|
|
1431 avail = AVAIL_AVAILABLE;
|
|
1432
|
|
1433 /* If the function can be overwritten, return OVERWRITABLE. Take
|
|
1434 care at least of two notable extensions - the COMDAT functions
|
|
1435 used to share template instantiations in C++ (this is symmetric
|
|
1436 to code cp_cannot_inline_tree_fn and probably shall be shared and
|
|
1437 the inlinability hooks completely eliminated).
|
|
1438
|
|
1439 ??? Does the C++ one definition rule allow us to always return
|
|
1440 AVAIL_AVAILABLE here? That would be good reason to preserve this
|
|
1441 hook Similarly deal with extern inline functions - this is again
|
|
1442 necessary to get C++ shared functions having keyed templates
|
|
1443 right and in the C extension documentation we probably should
|
|
1444 document the requirement of both versions of function (extern
|
|
1445 inline and offline) having same side effect characteristics as
|
|
1446 good optimization is what this optimization is about. */
|
|
1447
|
|
1448 else if (!(*targetm.binds_local_p) (node->decl)
|
|
1449 && !DECL_COMDAT (node->decl) && !DECL_EXTERNAL (node->decl))
|
|
1450 avail = AVAIL_OVERWRITABLE;
|
|
1451 else avail = AVAIL_AVAILABLE;
|
|
1452
|
|
1453 return avail;
|
|
1454 }
|
|
1455
|
|
1456 /* Add the function FNDECL to the call graph.
|
|
1457 Unlike cgraph_finalize_function, this function is intended to be used
|
|
1458 by middle end and allows insertion of new function at arbitrary point
|
|
1459 of compilation. The function can be either in high, low or SSA form
|
|
1460 GIMPLE.
|
|
1461
|
|
1462 The function is assumed to be reachable and have address taken (so no
|
|
1463 API breaking optimizations are performed on it).
|
|
1464
|
|
1465 Main work done by this function is to enqueue the function for later
|
|
1466 processing to avoid need the passes to be re-entrant. */
|
|
1467
|
|
1468 void
|
|
1469 cgraph_add_new_function (tree fndecl, bool lowered)
|
|
1470 {
|
|
1471 struct cgraph_node *node;
|
|
1472 switch (cgraph_state)
|
|
1473 {
|
|
1474 case CGRAPH_STATE_CONSTRUCTION:
|
|
1475 /* Just enqueue function to be processed at nearest occurrence. */
|
|
1476 node = cgraph_node (fndecl);
|
|
1477 node->next_needed = cgraph_new_nodes;
|
|
1478 if (lowered)
|
|
1479 node->lowered = true;
|
|
1480 cgraph_new_nodes = node;
|
|
1481 break;
|
|
1482
|
|
1483 case CGRAPH_STATE_IPA:
|
|
1484 case CGRAPH_STATE_IPA_SSA:
|
|
1485 case CGRAPH_STATE_EXPANSION:
|
|
1486 /* Bring the function into finalized state and enqueue for later
|
|
1487 analyzing and compilation. */
|
|
1488 node = cgraph_node (fndecl);
|
|
1489 node->local.local = false;
|
|
1490 node->local.finalized = true;
|
|
1491 node->reachable = node->needed = true;
|
|
1492 if (!lowered && cgraph_state == CGRAPH_STATE_EXPANSION)
|
|
1493 {
|
|
1494 push_cfun (DECL_STRUCT_FUNCTION (fndecl));
|
|
1495 current_function_decl = fndecl;
|
|
1496 gimple_register_cfg_hooks ();
|
|
1497 tree_lowering_passes (fndecl);
|
|
1498 bitmap_obstack_initialize (NULL);
|
|
1499 if (!gimple_in_ssa_p (DECL_STRUCT_FUNCTION (fndecl)))
|
|
1500 execute_pass_list (pass_early_local_passes.pass.sub);
|
|
1501 bitmap_obstack_release (NULL);
|
|
1502 pop_cfun ();
|
|
1503 current_function_decl = NULL;
|
|
1504
|
|
1505 lowered = true;
|
|
1506 }
|
|
1507 if (lowered)
|
|
1508 node->lowered = true;
|
|
1509 node->next_needed = cgraph_new_nodes;
|
|
1510 cgraph_new_nodes = node;
|
|
1511 break;
|
|
1512
|
|
1513 case CGRAPH_STATE_FINISHED:
|
|
1514 /* At the very end of compilation we have to do all the work up
|
|
1515 to expansion. */
|
|
1516 push_cfun (DECL_STRUCT_FUNCTION (fndecl));
|
|
1517 current_function_decl = fndecl;
|
|
1518 gimple_register_cfg_hooks ();
|
|
1519 if (!lowered)
|
|
1520 tree_lowering_passes (fndecl);
|
|
1521 bitmap_obstack_initialize (NULL);
|
|
1522 if (!gimple_in_ssa_p (DECL_STRUCT_FUNCTION (fndecl)))
|
|
1523 execute_pass_list (pass_early_local_passes.pass.sub);
|
|
1524 bitmap_obstack_release (NULL);
|
|
1525 tree_rest_of_compilation (fndecl);
|
|
1526 pop_cfun ();
|
|
1527 current_function_decl = NULL;
|
|
1528 break;
|
|
1529 }
|
|
1530 }
|
|
1531
|
|
1532 #include "gt-cgraph.h"
|