Mercurial > hg > CbC > CbC_gcc
annotate gcc/unwind-dw2-fde.c @ 56:3c8a44c06a95
Added tag gcc-4.4.5 for changeset 77e2b8dfacca
author | ryoma <e075725@ie.u-ryukyu.ac.jp> |
---|---|
date | Fri, 12 Feb 2010 23:41:23 +0900 |
parents | 77e2b8dfacca |
children | b7f97abdc517 |
rev | line source |
---|---|
0 | 1 /* Subroutines needed for unwinding stack frames for exception handling. */ |
2 /* Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2008, | |
3 2009 Free Software Foundation, Inc. | |
4 Contributed by Jason Merrill <jason@cygnus.com>. | |
5 | |
6 This file is part of GCC. | |
7 | |
8 GCC is free software; you can redistribute it and/or modify it under | |
9 the terms of the GNU General Public License as published by the Free | |
10 Software Foundation; either version 3, or (at your option) any later | |
11 version. | |
12 | |
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY | |
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
16 for more details. | |
17 | |
18 Under Section 7 of GPL version 3, you are granted additional | |
19 permissions described in the GCC Runtime Library Exception, version | |
20 3.1, as published by the Free Software Foundation. | |
21 | |
22 You should have received a copy of the GNU General Public License and | |
23 a copy of the GCC Runtime Library Exception along with this program; | |
24 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see | |
25 <http://www.gnu.org/licenses/>. */ | |
26 | |
27 #ifndef _Unwind_Find_FDE | |
28 #include "tconfig.h" | |
29 #include "tsystem.h" | |
30 #include "coretypes.h" | |
31 #include "tm.h" | |
32 #include "dwarf2.h" | |
33 #include "unwind.h" | |
34 #define NO_BASE_OF_ENCODED_VALUE | |
35 #include "unwind-pe.h" | |
36 #include "unwind-dw2-fde.h" | |
37 #include "gthr.h" | |
38 #endif | |
39 | |
40 /* The unseen_objects list contains objects that have been registered | |
41 but not yet categorized in any way. The seen_objects list has had | |
42 its pc_begin and count fields initialized at minimum, and is sorted | |
43 by decreasing value of pc_begin. */ | |
44 static struct object *unseen_objects; | |
45 static struct object *seen_objects; | |
46 | |
47 #ifdef __GTHREAD_MUTEX_INIT | |
48 static __gthread_mutex_t object_mutex = __GTHREAD_MUTEX_INIT; | |
49 #else | |
50 static __gthread_mutex_t object_mutex; | |
51 #endif | |
52 | |
53 #ifdef __GTHREAD_MUTEX_INIT_FUNCTION | |
54 static void | |
55 init_object_mutex (void) | |
56 { | |
57 __GTHREAD_MUTEX_INIT_FUNCTION (&object_mutex); | |
58 } | |
59 | |
60 static void | |
61 init_object_mutex_once (void) | |
62 { | |
63 static __gthread_once_t once = __GTHREAD_ONCE_INIT; | |
64 __gthread_once (&once, init_object_mutex); | |
65 } | |
66 #else | |
67 #define init_object_mutex_once() | |
68 #endif | |
69 | |
70 /* Called from crtbegin.o to register the unwind info for an object. */ | |
71 | |
72 void | |
73 __register_frame_info_bases (const void *begin, struct object *ob, | |
74 void *tbase, void *dbase) | |
75 { | |
76 /* If .eh_frame is empty, don't register at all. */ | |
77 if ((const uword *) begin == 0 || *(const uword *) begin == 0) | |
78 return; | |
79 | |
80 ob->pc_begin = (void *)-1; | |
81 ob->tbase = tbase; | |
82 ob->dbase = dbase; | |
83 ob->u.single = begin; | |
84 ob->s.i = 0; | |
85 ob->s.b.encoding = DW_EH_PE_omit; | |
86 #ifdef DWARF2_OBJECT_END_PTR_EXTENSION | |
87 ob->fde_end = NULL; | |
88 #endif | |
89 | |
90 init_object_mutex_once (); | |
91 __gthread_mutex_lock (&object_mutex); | |
92 | |
93 ob->next = unseen_objects; | |
94 unseen_objects = ob; | |
95 | |
96 __gthread_mutex_unlock (&object_mutex); | |
97 } | |
98 | |
99 void | |
100 __register_frame_info (const void *begin, struct object *ob) | |
101 { | |
102 __register_frame_info_bases (begin, ob, 0, 0); | |
103 } | |
104 | |
105 void | |
106 __register_frame (void *begin) | |
107 { | |
108 struct object *ob; | |
109 | |
110 /* If .eh_frame is empty, don't register at all. */ | |
111 if (*(uword *) begin == 0) | |
112 return; | |
113 | |
114 ob = malloc (sizeof (struct object)); | |
115 __register_frame_info (begin, ob); | |
116 } | |
117 | |
118 /* Similar, but BEGIN is actually a pointer to a table of unwind entries | |
119 for different translation units. Called from the file generated by | |
120 collect2. */ | |
121 | |
122 void | |
123 __register_frame_info_table_bases (void *begin, struct object *ob, | |
124 void *tbase, void *dbase) | |
125 { | |
126 ob->pc_begin = (void *)-1; | |
127 ob->tbase = tbase; | |
128 ob->dbase = dbase; | |
129 ob->u.array = begin; | |
130 ob->s.i = 0; | |
131 ob->s.b.from_array = 1; | |
132 ob->s.b.encoding = DW_EH_PE_omit; | |
133 | |
134 init_object_mutex_once (); | |
135 __gthread_mutex_lock (&object_mutex); | |
136 | |
137 ob->next = unseen_objects; | |
138 unseen_objects = ob; | |
139 | |
140 __gthread_mutex_unlock (&object_mutex); | |
141 } | |
142 | |
143 void | |
144 __register_frame_info_table (void *begin, struct object *ob) | |
145 { | |
146 __register_frame_info_table_bases (begin, ob, 0, 0); | |
147 } | |
148 | |
149 void | |
150 __register_frame_table (void *begin) | |
151 { | |
152 struct object *ob = malloc (sizeof (struct object)); | |
153 __register_frame_info_table (begin, ob); | |
154 } | |
155 | |
156 /* Called from crtbegin.o to deregister the unwind info for an object. */ | |
157 /* ??? Glibc has for a while now exported __register_frame_info and | |
158 __deregister_frame_info. If we call __register_frame_info_bases | |
159 from crtbegin (wherein it is declared weak), and this object does | |
160 not get pulled from libgcc.a for other reasons, then the | |
161 invocation of __deregister_frame_info will be resolved from glibc. | |
162 Since the registration did not happen there, we'll die. | |
163 | |
164 Therefore, declare a new deregistration entry point that does the | |
165 exact same thing, but will resolve to the same library as | |
166 implements __register_frame_info_bases. */ | |
167 | |
168 void * | |
169 __deregister_frame_info_bases (const void *begin) | |
170 { | |
171 struct object **p; | |
172 struct object *ob = 0; | |
173 | |
174 /* If .eh_frame is empty, we haven't registered. */ | |
175 if ((const uword *) begin == 0 || *(const uword *) begin == 0) | |
176 return ob; | |
177 | |
178 init_object_mutex_once (); | |
179 __gthread_mutex_lock (&object_mutex); | |
180 | |
181 for (p = &unseen_objects; *p ; p = &(*p)->next) | |
182 if ((*p)->u.single == begin) | |
183 { | |
184 ob = *p; | |
185 *p = ob->next; | |
186 goto out; | |
187 } | |
188 | |
189 for (p = &seen_objects; *p ; p = &(*p)->next) | |
190 if ((*p)->s.b.sorted) | |
191 { | |
192 if ((*p)->u.sort->orig_data == begin) | |
193 { | |
194 ob = *p; | |
195 *p = ob->next; | |
196 free (ob->u.sort); | |
197 goto out; | |
198 } | |
199 } | |
200 else | |
201 { | |
202 if ((*p)->u.single == begin) | |
203 { | |
204 ob = *p; | |
205 *p = ob->next; | |
206 goto out; | |
207 } | |
208 } | |
209 | |
210 out: | |
211 __gthread_mutex_unlock (&object_mutex); | |
212 gcc_assert (ob); | |
213 return (void *) ob; | |
214 } | |
215 | |
216 void * | |
217 __deregister_frame_info (const void *begin) | |
218 { | |
219 return __deregister_frame_info_bases (begin); | |
220 } | |
221 | |
222 void | |
223 __deregister_frame (void *begin) | |
224 { | |
225 /* If .eh_frame is empty, we haven't registered. */ | |
226 if (*(uword *) begin != 0) | |
227 free (__deregister_frame_info (begin)); | |
228 } | |
229 | |
230 | |
231 /* Like base_of_encoded_value, but take the base from a struct object | |
232 instead of an _Unwind_Context. */ | |
233 | |
234 static _Unwind_Ptr | |
235 base_from_object (unsigned char encoding, struct object *ob) | |
236 { | |
237 if (encoding == DW_EH_PE_omit) | |
238 return 0; | |
239 | |
240 switch (encoding & 0x70) | |
241 { | |
242 case DW_EH_PE_absptr: | |
243 case DW_EH_PE_pcrel: | |
244 case DW_EH_PE_aligned: | |
245 return 0; | |
246 | |
247 case DW_EH_PE_textrel: | |
248 return (_Unwind_Ptr) ob->tbase; | |
249 case DW_EH_PE_datarel: | |
250 return (_Unwind_Ptr) ob->dbase; | |
251 default: | |
252 gcc_unreachable (); | |
253 } | |
254 } | |
255 | |
256 /* Return the FDE pointer encoding from the CIE. */ | |
257 /* ??? This is a subset of extract_cie_info from unwind-dw2.c. */ | |
258 | |
259 static int | |
260 get_cie_encoding (const struct dwarf_cie *cie) | |
261 { | |
262 const unsigned char *aug, *p; | |
263 _Unwind_Ptr dummy; | |
264 _uleb128_t utmp; | |
265 _sleb128_t stmp; | |
266 | |
267 aug = cie->augmentation; | |
268 if (aug[0] != 'z') | |
269 return DW_EH_PE_absptr; | |
270 | |
271 p = aug + strlen ((const char *)aug) + 1; /* Skip the augmentation string. */ | |
272 p = read_uleb128 (p, &utmp); /* Skip code alignment. */ | |
273 p = read_sleb128 (p, &stmp); /* Skip data alignment. */ | |
274 if (cie->version == 1) /* Skip return address column. */ | |
275 p++; | |
276 else | |
277 p = read_uleb128 (p, &utmp); | |
278 | |
279 aug++; /* Skip 'z' */ | |
280 p = read_uleb128 (p, &utmp); /* Skip augmentation length. */ | |
281 while (1) | |
282 { | |
283 /* This is what we're looking for. */ | |
284 if (*aug == 'R') | |
285 return *p; | |
286 /* Personality encoding and pointer. */ | |
287 else if (*aug == 'P') | |
288 { | |
289 /* ??? Avoid dereferencing indirect pointers, since we're | |
290 faking the base address. Gotta keep DW_EH_PE_aligned | |
291 intact, however. */ | |
292 p = read_encoded_value_with_base (*p & 0x7F, 0, p + 1, &dummy); | |
293 } | |
294 /* LSDA encoding. */ | |
295 else if (*aug == 'L') | |
296 p++; | |
297 /* Otherwise end of string, or unknown augmentation. */ | |
298 else | |
299 return DW_EH_PE_absptr; | |
300 aug++; | |
301 } | |
302 } | |
303 | |
304 static inline int | |
305 get_fde_encoding (const struct dwarf_fde *f) | |
306 { | |
307 return get_cie_encoding (get_cie (f)); | |
308 } | |
309 | |
310 | |
311 /* Sorting an array of FDEs by address. | |
312 (Ideally we would have the linker sort the FDEs so we don't have to do | |
313 it at run time. But the linkers are not yet prepared for this.) */ | |
314 | |
315 /* Comparison routines. Three variants of increasing complexity. */ | |
316 | |
317 static int | |
318 fde_unencoded_compare (struct object *ob __attribute__((unused)), | |
319 const fde *x, const fde *y) | |
320 { | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
36
diff
changeset
|
321 _Unwind_Ptr x_ptr, y_ptr; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
36
diff
changeset
|
322 memcpy (&x_ptr, x->pc_begin, sizeof (_Unwind_Ptr)); |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
36
diff
changeset
|
323 memcpy (&y_ptr, y->pc_begin, sizeof (_Unwind_Ptr)); |
0 | 324 |
325 if (x_ptr > y_ptr) | |
326 return 1; | |
327 if (x_ptr < y_ptr) | |
328 return -1; | |
329 return 0; | |
330 } | |
331 | |
332 static int | |
333 fde_single_encoding_compare (struct object *ob, const fde *x, const fde *y) | |
334 { | |
335 _Unwind_Ptr base, x_ptr, y_ptr; | |
336 | |
337 base = base_from_object (ob->s.b.encoding, ob); | |
338 read_encoded_value_with_base (ob->s.b.encoding, base, x->pc_begin, &x_ptr); | |
339 read_encoded_value_with_base (ob->s.b.encoding, base, y->pc_begin, &y_ptr); | |
340 | |
341 if (x_ptr > y_ptr) | |
342 return 1; | |
343 if (x_ptr < y_ptr) | |
344 return -1; | |
345 return 0; | |
346 } | |
347 | |
348 static int | |
349 fde_mixed_encoding_compare (struct object *ob, const fde *x, const fde *y) | |
350 { | |
351 int x_encoding, y_encoding; | |
352 _Unwind_Ptr x_ptr, y_ptr; | |
353 | |
354 x_encoding = get_fde_encoding (x); | |
355 read_encoded_value_with_base (x_encoding, base_from_object (x_encoding, ob), | |
356 x->pc_begin, &x_ptr); | |
357 | |
358 y_encoding = get_fde_encoding (y); | |
359 read_encoded_value_with_base (y_encoding, base_from_object (y_encoding, ob), | |
360 y->pc_begin, &y_ptr); | |
361 | |
362 if (x_ptr > y_ptr) | |
363 return 1; | |
364 if (x_ptr < y_ptr) | |
365 return -1; | |
366 return 0; | |
367 } | |
368 | |
369 typedef int (*fde_compare_t) (struct object *, const fde *, const fde *); | |
370 | |
371 | |
372 /* This is a special mix of insertion sort and heap sort, optimized for | |
373 the data sets that actually occur. They look like | |
374 101 102 103 127 128 105 108 110 190 111 115 119 125 160 126 129 130. | |
375 I.e. a linearly increasing sequence (coming from functions in the text | |
376 section), with additionally a few unordered elements (coming from functions | |
377 in gnu_linkonce sections) whose values are higher than the values in the | |
378 surrounding linear sequence (but not necessarily higher than the values | |
379 at the end of the linear sequence!). | |
380 The worst-case total run time is O(N) + O(n log (n)), where N is the | |
381 total number of FDEs and n is the number of erratic ones. */ | |
382 | |
383 struct fde_accumulator | |
384 { | |
385 struct fde_vector *linear; | |
386 struct fde_vector *erratic; | |
387 }; | |
388 | |
389 static inline int | |
390 start_fde_sort (struct fde_accumulator *accu, size_t count) | |
391 { | |
392 size_t size; | |
393 if (! count) | |
394 return 0; | |
395 | |
396 size = sizeof (struct fde_vector) + sizeof (const fde *) * count; | |
397 if ((accu->linear = malloc (size))) | |
398 { | |
399 accu->linear->count = 0; | |
400 if ((accu->erratic = malloc (size))) | |
401 accu->erratic->count = 0; | |
402 return 1; | |
403 } | |
404 else | |
405 return 0; | |
406 } | |
407 | |
408 static inline void | |
409 fde_insert (struct fde_accumulator *accu, const fde *this_fde) | |
410 { | |
411 if (accu->linear) | |
412 accu->linear->array[accu->linear->count++] = this_fde; | |
413 } | |
414 | |
415 /* Split LINEAR into a linear sequence with low values and an erratic | |
416 sequence with high values, put the linear one (of longest possible | |
417 length) into LINEAR and the erratic one into ERRATIC. This is O(N). | |
418 | |
419 Because the longest linear sequence we are trying to locate within the | |
420 incoming LINEAR array can be interspersed with (high valued) erratic | |
421 entries. We construct a chain indicating the sequenced entries. | |
422 To avoid having to allocate this chain, we overlay it onto the space of | |
423 the ERRATIC array during construction. A final pass iterates over the | |
424 chain to determine what should be placed in the ERRATIC array, and | |
425 what is the linear sequence. This overlay is safe from aliasing. */ | |
426 | |
427 static inline void | |
428 fde_split (struct object *ob, fde_compare_t fde_compare, | |
429 struct fde_vector *linear, struct fde_vector *erratic) | |
430 { | |
431 static const fde *marker; | |
432 size_t count = linear->count; | |
433 const fde *const *chain_end = ▮ | |
434 size_t i, j, k; | |
435 | |
436 /* This should optimize out, but it is wise to make sure this assumption | |
437 is correct. Should these have different sizes, we cannot cast between | |
438 them and the overlaying onto ERRATIC will not work. */ | |
439 gcc_assert (sizeof (const fde *) == sizeof (const fde **)); | |
440 | |
441 for (i = 0; i < count; i++) | |
442 { | |
443 const fde *const *probe; | |
444 | |
445 for (probe = chain_end; | |
446 probe != &marker && fde_compare (ob, linear->array[i], *probe) < 0; | |
447 probe = chain_end) | |
448 { | |
449 chain_end = (const fde *const*) erratic->array[probe - linear->array]; | |
450 erratic->array[probe - linear->array] = NULL; | |
451 } | |
452 erratic->array[i] = (const fde *) chain_end; | |
453 chain_end = &linear->array[i]; | |
454 } | |
455 | |
456 /* Each entry in LINEAR which is part of the linear sequence we have | |
457 discovered will correspond to a non-NULL entry in the chain we built in | |
458 the ERRATIC array. */ | |
459 for (i = j = k = 0; i < count; i++) | |
460 if (erratic->array[i]) | |
461 linear->array[j++] = linear->array[i]; | |
462 else | |
463 erratic->array[k++] = linear->array[i]; | |
464 linear->count = j; | |
465 erratic->count = k; | |
466 } | |
467 | |
468 #define SWAP(x,y) do { const fde * tmp = x; x = y; y = tmp; } while (0) | |
469 | |
470 /* Convert a semi-heap to a heap. A semi-heap is a heap except possibly | |
471 for the first (root) node; push it down to its rightful place. */ | |
472 | |
473 static void | |
474 frame_downheap (struct object *ob, fde_compare_t fde_compare, const fde **a, | |
475 int lo, int hi) | |
476 { | |
477 int i, j; | |
478 | |
479 for (i = lo, j = 2*i+1; | |
480 j < hi; | |
481 j = 2*i+1) | |
482 { | |
483 if (j+1 < hi && fde_compare (ob, a[j], a[j+1]) < 0) | |
484 ++j; | |
485 | |
486 if (fde_compare (ob, a[i], a[j]) < 0) | |
487 { | |
488 SWAP (a[i], a[j]); | |
489 i = j; | |
490 } | |
491 else | |
492 break; | |
493 } | |
494 } | |
495 | |
496 /* This is O(n log(n)). BSD/OS defines heapsort in stdlib.h, so we must | |
497 use a name that does not conflict. */ | |
498 | |
499 static void | |
500 frame_heapsort (struct object *ob, fde_compare_t fde_compare, | |
501 struct fde_vector *erratic) | |
502 { | |
503 /* For a description of this algorithm, see: | |
504 Samuel P. Harbison, Guy L. Steele Jr.: C, a reference manual, 2nd ed., | |
505 p. 60-61. */ | |
506 const fde ** a = erratic->array; | |
507 /* A portion of the array is called a "heap" if for all i>=0: | |
508 If i and 2i+1 are valid indices, then a[i] >= a[2i+1]. | |
509 If i and 2i+2 are valid indices, then a[i] >= a[2i+2]. */ | |
510 size_t n = erratic->count; | |
511 int m; | |
512 | |
513 /* Expand our heap incrementally from the end of the array, heapifying | |
514 each resulting semi-heap as we go. After each step, a[m] is the top | |
515 of a heap. */ | |
516 for (m = n/2-1; m >= 0; --m) | |
517 frame_downheap (ob, fde_compare, a, m, n); | |
518 | |
519 /* Shrink our heap incrementally from the end of the array, first | |
520 swapping out the largest element a[0] and then re-heapifying the | |
521 resulting semi-heap. After each step, a[0..m) is a heap. */ | |
522 for (m = n-1; m >= 1; --m) | |
523 { | |
524 SWAP (a[0], a[m]); | |
525 frame_downheap (ob, fde_compare, a, 0, m); | |
526 } | |
527 #undef SWAP | |
528 } | |
529 | |
530 /* Merge V1 and V2, both sorted, and put the result into V1. */ | |
531 static inline void | |
532 fde_merge (struct object *ob, fde_compare_t fde_compare, | |
533 struct fde_vector *v1, struct fde_vector *v2) | |
534 { | |
535 size_t i1, i2; | |
536 const fde * fde2; | |
537 | |
538 i2 = v2->count; | |
539 if (i2 > 0) | |
540 { | |
541 i1 = v1->count; | |
542 do | |
543 { | |
544 i2--; | |
545 fde2 = v2->array[i2]; | |
546 while (i1 > 0 && fde_compare (ob, v1->array[i1-1], fde2) > 0) | |
547 { | |
548 v1->array[i1+i2] = v1->array[i1-1]; | |
549 i1--; | |
550 } | |
551 v1->array[i1+i2] = fde2; | |
552 } | |
553 while (i2 > 0); | |
554 v1->count += v2->count; | |
555 } | |
556 } | |
557 | |
558 static inline void | |
559 end_fde_sort (struct object *ob, struct fde_accumulator *accu, size_t count) | |
560 { | |
561 fde_compare_t fde_compare; | |
562 | |
563 gcc_assert (!accu->linear || accu->linear->count == count); | |
564 | |
565 if (ob->s.b.mixed_encoding) | |
566 fde_compare = fde_mixed_encoding_compare; | |
567 else if (ob->s.b.encoding == DW_EH_PE_absptr) | |
568 fde_compare = fde_unencoded_compare; | |
569 else | |
570 fde_compare = fde_single_encoding_compare; | |
571 | |
572 if (accu->erratic) | |
573 { | |
574 fde_split (ob, fde_compare, accu->linear, accu->erratic); | |
575 gcc_assert (accu->linear->count + accu->erratic->count == count); | |
576 frame_heapsort (ob, fde_compare, accu->erratic); | |
577 fde_merge (ob, fde_compare, accu->linear, accu->erratic); | |
578 free (accu->erratic); | |
579 } | |
580 else | |
581 { | |
582 /* We've not managed to malloc an erratic array, | |
583 so heap sort in the linear one. */ | |
584 frame_heapsort (ob, fde_compare, accu->linear); | |
585 } | |
586 } | |
587 | |
588 | |
589 /* Update encoding, mixed_encoding, and pc_begin for OB for the | |
590 fde array beginning at THIS_FDE. Return the number of fdes | |
591 encountered along the way. */ | |
592 | |
593 static size_t | |
594 classify_object_over_fdes (struct object *ob, const fde *this_fde) | |
595 { | |
596 const struct dwarf_cie *last_cie = 0; | |
597 size_t count = 0; | |
598 int encoding = DW_EH_PE_absptr; | |
599 _Unwind_Ptr base = 0; | |
600 | |
601 for (; ! last_fde (ob, this_fde); this_fde = next_fde (this_fde)) | |
602 { | |
603 const struct dwarf_cie *this_cie; | |
604 _Unwind_Ptr mask, pc_begin; | |
605 | |
606 /* Skip CIEs. */ | |
607 if (this_fde->CIE_delta == 0) | |
608 continue; | |
609 | |
610 /* Determine the encoding for this FDE. Note mixed encoded | |
611 objects for later. */ | |
612 this_cie = get_cie (this_fde); | |
613 if (this_cie != last_cie) | |
614 { | |
615 last_cie = this_cie; | |
616 encoding = get_cie_encoding (this_cie); | |
617 base = base_from_object (encoding, ob); | |
618 if (ob->s.b.encoding == DW_EH_PE_omit) | |
619 ob->s.b.encoding = encoding; | |
620 else if (ob->s.b.encoding != encoding) | |
621 ob->s.b.mixed_encoding = 1; | |
622 } | |
623 | |
624 read_encoded_value_with_base (encoding, base, this_fde->pc_begin, | |
625 &pc_begin); | |
626 | |
627 /* Take care to ignore link-once functions that were removed. | |
628 In these cases, the function address will be NULL, but if | |
629 the encoding is smaller than a pointer a true NULL may not | |
630 be representable. Assume 0 in the representable bits is NULL. */ | |
631 mask = size_of_encoded_value (encoding); | |
632 if (mask < sizeof (void *)) | |
36 | 633 mask = (((_Unwind_Ptr) 1) << (mask << 3)) - 1; |
0 | 634 else |
635 mask = -1; | |
636 | |
637 if ((pc_begin & mask) == 0) | |
638 continue; | |
639 | |
640 count += 1; | |
641 if ((void *) pc_begin < ob->pc_begin) | |
642 ob->pc_begin = (void *) pc_begin; | |
643 } | |
644 | |
645 return count; | |
646 } | |
647 | |
648 static void | |
649 add_fdes (struct object *ob, struct fde_accumulator *accu, const fde *this_fde) | |
650 { | |
651 const struct dwarf_cie *last_cie = 0; | |
652 int encoding = ob->s.b.encoding; | |
653 _Unwind_Ptr base = base_from_object (ob->s.b.encoding, ob); | |
654 | |
655 for (; ! last_fde (ob, this_fde); this_fde = next_fde (this_fde)) | |
656 { | |
657 const struct dwarf_cie *this_cie; | |
658 | |
659 /* Skip CIEs. */ | |
660 if (this_fde->CIE_delta == 0) | |
661 continue; | |
662 | |
663 if (ob->s.b.mixed_encoding) | |
664 { | |
665 /* Determine the encoding for this FDE. Note mixed encoded | |
666 objects for later. */ | |
667 this_cie = get_cie (this_fde); | |
668 if (this_cie != last_cie) | |
669 { | |
670 last_cie = this_cie; | |
671 encoding = get_cie_encoding (this_cie); | |
672 base = base_from_object (encoding, ob); | |
673 } | |
674 } | |
675 | |
676 if (encoding == DW_EH_PE_absptr) | |
677 { | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
36
diff
changeset
|
678 _Unwind_Ptr ptr; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
36
diff
changeset
|
679 memcpy (&ptr, this_fde->pc_begin, sizeof (_Unwind_Ptr)); |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
36
diff
changeset
|
680 if (ptr == 0) |
0 | 681 continue; |
682 } | |
683 else | |
684 { | |
685 _Unwind_Ptr pc_begin, mask; | |
686 | |
687 read_encoded_value_with_base (encoding, base, this_fde->pc_begin, | |
688 &pc_begin); | |
689 | |
690 /* Take care to ignore link-once functions that were removed. | |
691 In these cases, the function address will be NULL, but if | |
692 the encoding is smaller than a pointer a true NULL may not | |
693 be representable. Assume 0 in the representable bits is NULL. */ | |
694 mask = size_of_encoded_value (encoding); | |
695 if (mask < sizeof (void *)) | |
36 | 696 mask = (((_Unwind_Ptr) 1) << (mask << 3)) - 1; |
0 | 697 else |
698 mask = -1; | |
699 | |
700 if ((pc_begin & mask) == 0) | |
701 continue; | |
702 } | |
703 | |
704 fde_insert (accu, this_fde); | |
705 } | |
706 } | |
707 | |
708 /* Set up a sorted array of pointers to FDEs for a loaded object. We | |
709 count up the entries before allocating the array because it's likely to | |
710 be faster. We can be called multiple times, should we have failed to | |
711 allocate a sorted fde array on a previous occasion. */ | |
712 | |
713 static inline void | |
714 init_object (struct object* ob) | |
715 { | |
716 struct fde_accumulator accu; | |
717 size_t count; | |
718 | |
719 count = ob->s.b.count; | |
720 if (count == 0) | |
721 { | |
722 if (ob->s.b.from_array) | |
723 { | |
724 fde **p = ob->u.array; | |
725 for (count = 0; *p; ++p) | |
726 count += classify_object_over_fdes (ob, *p); | |
727 } | |
728 else | |
729 count = classify_object_over_fdes (ob, ob->u.single); | |
730 | |
731 /* The count field we have in the main struct object is somewhat | |
732 limited, but should suffice for virtually all cases. If the | |
733 counted value doesn't fit, re-write a zero. The worst that | |
734 happens is that we re-count next time -- admittedly non-trivial | |
735 in that this implies some 2M fdes, but at least we function. */ | |
736 ob->s.b.count = count; | |
737 if (ob->s.b.count != count) | |
738 ob->s.b.count = 0; | |
739 } | |
740 | |
741 if (!start_fde_sort (&accu, count)) | |
742 return; | |
743 | |
744 if (ob->s.b.from_array) | |
745 { | |
746 fde **p; | |
747 for (p = ob->u.array; *p; ++p) | |
748 add_fdes (ob, &accu, *p); | |
749 } | |
750 else | |
751 add_fdes (ob, &accu, ob->u.single); | |
752 | |
753 end_fde_sort (ob, &accu, count); | |
754 | |
755 /* Save the original fde pointer, since this is the key by which the | |
756 DSO will deregister the object. */ | |
757 accu.linear->orig_data = ob->u.single; | |
758 ob->u.sort = accu.linear; | |
759 | |
760 ob->s.b.sorted = 1; | |
761 } | |
762 | |
763 /* A linear search through a set of FDEs for the given PC. This is | |
764 used when there was insufficient memory to allocate and sort an | |
765 array. */ | |
766 | |
767 static const fde * | |
768 linear_search_fdes (struct object *ob, const fde *this_fde, void *pc) | |
769 { | |
770 const struct dwarf_cie *last_cie = 0; | |
771 int encoding = ob->s.b.encoding; | |
772 _Unwind_Ptr base = base_from_object (ob->s.b.encoding, ob); | |
773 | |
774 for (; ! last_fde (ob, this_fde); this_fde = next_fde (this_fde)) | |
775 { | |
776 const struct dwarf_cie *this_cie; | |
777 _Unwind_Ptr pc_begin, pc_range; | |
778 | |
779 /* Skip CIEs. */ | |
780 if (this_fde->CIE_delta == 0) | |
781 continue; | |
782 | |
783 if (ob->s.b.mixed_encoding) | |
784 { | |
785 /* Determine the encoding for this FDE. Note mixed encoded | |
786 objects for later. */ | |
787 this_cie = get_cie (this_fde); | |
788 if (this_cie != last_cie) | |
789 { | |
790 last_cie = this_cie; | |
791 encoding = get_cie_encoding (this_cie); | |
792 base = base_from_object (encoding, ob); | |
793 } | |
794 } | |
795 | |
796 if (encoding == DW_EH_PE_absptr) | |
797 { | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
36
diff
changeset
|
798 const _Unwind_Ptr *pc_array = (const _Unwind_Ptr *) this_fde->pc_begin; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
36
diff
changeset
|
799 pc_begin = pc_array[0]; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
36
diff
changeset
|
800 pc_range = pc_array[1]; |
0 | 801 if (pc_begin == 0) |
802 continue; | |
803 } | |
804 else | |
805 { | |
806 _Unwind_Ptr mask; | |
807 const unsigned char *p; | |
808 | |
809 p = read_encoded_value_with_base (encoding, base, | |
810 this_fde->pc_begin, &pc_begin); | |
811 read_encoded_value_with_base (encoding & 0x0F, 0, p, &pc_range); | |
812 | |
813 /* Take care to ignore link-once functions that were removed. | |
814 In these cases, the function address will be NULL, but if | |
815 the encoding is smaller than a pointer a true NULL may not | |
816 be representable. Assume 0 in the representable bits is NULL. */ | |
817 mask = size_of_encoded_value (encoding); | |
818 if (mask < sizeof (void *)) | |
36 | 819 mask = (((_Unwind_Ptr) 1) << (mask << 3)) - 1; |
0 | 820 else |
821 mask = -1; | |
822 | |
823 if ((pc_begin & mask) == 0) | |
824 continue; | |
825 } | |
826 | |
827 if ((_Unwind_Ptr) pc - pc_begin < pc_range) | |
828 return this_fde; | |
829 } | |
830 | |
831 return NULL; | |
832 } | |
833 | |
834 /* Binary search for an FDE containing the given PC. Here are three | |
835 implementations of increasing complexity. */ | |
836 | |
837 static inline const fde * | |
838 binary_search_unencoded_fdes (struct object *ob, void *pc) | |
839 { | |
840 struct fde_vector *vec = ob->u.sort; | |
841 size_t lo, hi; | |
842 | |
843 for (lo = 0, hi = vec->count; lo < hi; ) | |
844 { | |
845 size_t i = (lo + hi) / 2; | |
846 const fde *const f = vec->array[i]; | |
55
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
36
diff
changeset
|
847 void *pc_begin; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
36
diff
changeset
|
848 uaddr pc_range; |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
36
diff
changeset
|
849 memcpy (&pc_begin, (const void * const *) f->pc_begin, sizeof (void *)); |
77e2b8dfacca
update it from 4.4.3 to 4.5.0
ryoma <e075725@ie.u-ryukyu.ac.jp>
parents:
36
diff
changeset
|
850 memcpy (&pc_range, (const uaddr *) f->pc_begin + 1, sizeof (uaddr)); |
0 | 851 |
852 if (pc < pc_begin) | |
853 hi = i; | |
854 else if (pc >= pc_begin + pc_range) | |
855 lo = i + 1; | |
856 else | |
857 return f; | |
858 } | |
859 | |
860 return NULL; | |
861 } | |
862 | |
863 static inline const fde * | |
864 binary_search_single_encoding_fdes (struct object *ob, void *pc) | |
865 { | |
866 struct fde_vector *vec = ob->u.sort; | |
867 int encoding = ob->s.b.encoding; | |
868 _Unwind_Ptr base = base_from_object (encoding, ob); | |
869 size_t lo, hi; | |
870 | |
871 for (lo = 0, hi = vec->count; lo < hi; ) | |
872 { | |
873 size_t i = (lo + hi) / 2; | |
874 const fde *f = vec->array[i]; | |
875 _Unwind_Ptr pc_begin, pc_range; | |
876 const unsigned char *p; | |
877 | |
878 p = read_encoded_value_with_base (encoding, base, f->pc_begin, | |
879 &pc_begin); | |
880 read_encoded_value_with_base (encoding & 0x0F, 0, p, &pc_range); | |
881 | |
882 if ((_Unwind_Ptr) pc < pc_begin) | |
883 hi = i; | |
884 else if ((_Unwind_Ptr) pc >= pc_begin + pc_range) | |
885 lo = i + 1; | |
886 else | |
887 return f; | |
888 } | |
889 | |
890 return NULL; | |
891 } | |
892 | |
893 static inline const fde * | |
894 binary_search_mixed_encoding_fdes (struct object *ob, void *pc) | |
895 { | |
896 struct fde_vector *vec = ob->u.sort; | |
897 size_t lo, hi; | |
898 | |
899 for (lo = 0, hi = vec->count; lo < hi; ) | |
900 { | |
901 size_t i = (lo + hi) / 2; | |
902 const fde *f = vec->array[i]; | |
903 _Unwind_Ptr pc_begin, pc_range; | |
904 const unsigned char *p; | |
905 int encoding; | |
906 | |
907 encoding = get_fde_encoding (f); | |
908 p = read_encoded_value_with_base (encoding, | |
909 base_from_object (encoding, ob), | |
910 f->pc_begin, &pc_begin); | |
911 read_encoded_value_with_base (encoding & 0x0F, 0, p, &pc_range); | |
912 | |
913 if ((_Unwind_Ptr) pc < pc_begin) | |
914 hi = i; | |
915 else if ((_Unwind_Ptr) pc >= pc_begin + pc_range) | |
916 lo = i + 1; | |
917 else | |
918 return f; | |
919 } | |
920 | |
921 return NULL; | |
922 } | |
923 | |
924 static const fde * | |
925 search_object (struct object* ob, void *pc) | |
926 { | |
927 /* If the data hasn't been sorted, try to do this now. We may have | |
928 more memory available than last time we tried. */ | |
929 if (! ob->s.b.sorted) | |
930 { | |
931 init_object (ob); | |
932 | |
933 /* Despite the above comment, the normal reason to get here is | |
934 that we've not processed this object before. A quick range | |
935 check is in order. */ | |
936 if (pc < ob->pc_begin) | |
937 return NULL; | |
938 } | |
939 | |
940 if (ob->s.b.sorted) | |
941 { | |
942 if (ob->s.b.mixed_encoding) | |
943 return binary_search_mixed_encoding_fdes (ob, pc); | |
944 else if (ob->s.b.encoding == DW_EH_PE_absptr) | |
945 return binary_search_unencoded_fdes (ob, pc); | |
946 else | |
947 return binary_search_single_encoding_fdes (ob, pc); | |
948 } | |
949 else | |
950 { | |
951 /* Long slow laborious linear search, cos we've no memory. */ | |
952 if (ob->s.b.from_array) | |
953 { | |
954 fde **p; | |
955 for (p = ob->u.array; *p ; p++) | |
956 { | |
957 const fde *f = linear_search_fdes (ob, *p, pc); | |
958 if (f) | |
959 return f; | |
960 } | |
961 return NULL; | |
962 } | |
963 else | |
964 return linear_search_fdes (ob, ob->u.single, pc); | |
965 } | |
966 } | |
967 | |
968 const fde * | |
969 _Unwind_Find_FDE (void *pc, struct dwarf_eh_bases *bases) | |
970 { | |
971 struct object *ob; | |
972 const fde *f = NULL; | |
973 | |
974 init_object_mutex_once (); | |
975 __gthread_mutex_lock (&object_mutex); | |
976 | |
977 /* Linear search through the classified objects, to find the one | |
978 containing the pc. Note that pc_begin is sorted descending, and | |
979 we expect objects to be non-overlapping. */ | |
980 for (ob = seen_objects; ob; ob = ob->next) | |
981 if (pc >= ob->pc_begin) | |
982 { | |
983 f = search_object (ob, pc); | |
984 if (f) | |
985 goto fini; | |
986 break; | |
987 } | |
988 | |
989 /* Classify and search the objects we've not yet processed. */ | |
990 while ((ob = unseen_objects)) | |
991 { | |
992 struct object **p; | |
993 | |
994 unseen_objects = ob->next; | |
995 f = search_object (ob, pc); | |
996 | |
997 /* Insert the object into the classified list. */ | |
998 for (p = &seen_objects; *p ; p = &(*p)->next) | |
999 if ((*p)->pc_begin < ob->pc_begin) | |
1000 break; | |
1001 ob->next = *p; | |
1002 *p = ob; | |
1003 | |
1004 if (f) | |
1005 goto fini; | |
1006 } | |
1007 | |
1008 fini: | |
1009 __gthread_mutex_unlock (&object_mutex); | |
1010 | |
1011 if (f) | |
1012 { | |
1013 int encoding; | |
1014 _Unwind_Ptr func; | |
1015 | |
1016 bases->tbase = ob->tbase; | |
1017 bases->dbase = ob->dbase; | |
1018 | |
1019 encoding = ob->s.b.encoding; | |
1020 if (ob->s.b.mixed_encoding) | |
1021 encoding = get_fde_encoding (f); | |
1022 read_encoded_value_with_base (encoding, base_from_object (encoding, ob), | |
1023 f->pc_begin, &func); | |
1024 bases->func = (void *) func; | |
1025 } | |
1026 | |
1027 return f; | |
1028 } |