Mercurial > hg > CbC > CbC_llvm
comparison lld/MachO/UnwindInfoSection.cpp @ 223:5f17cb93ff66 llvm-original
LLVM13 (2021/7/18)
author | Shinji KONO <kono@ie.u-ryukyu.ac.jp> |
---|---|
date | Sun, 18 Jul 2021 22:43:00 +0900 |
parents | 79ff65ed7e25 |
children | c4bab56944e8 |
comparison
equal
deleted
inserted
replaced
222:81f6424ef0e3 | 223:5f17cb93ff66 |
---|---|
90 // here. | 90 // here. |
91 | 91 |
92 // TODO(gkm): prune __eh_frame entries superseded by __unwind_info, PR50410 | 92 // TODO(gkm): prune __eh_frame entries superseded by __unwind_info, PR50410 |
93 // TODO(gkm): how do we align the 2nd-level pages? | 93 // TODO(gkm): how do we align the 2nd-level pages? |
94 | 94 |
95 using EncodingMap = llvm::DenseMap<compact_unwind_encoding_t, size_t>; | 95 using EncodingMap = DenseMap<compact_unwind_encoding_t, size_t>; |
96 | 96 |
97 struct SecondLevelPage { | 97 struct SecondLevelPage { |
98 uint32_t kind; | 98 uint32_t kind; |
99 size_t entryIndex; | 99 size_t entryIndex; |
100 size_t entryCount; | 100 size_t entryCount; |
101 size_t byteCount; | 101 size_t byteCount; |
102 std::vector<compact_unwind_encoding_t> localEncodings; | 102 std::vector<compact_unwind_encoding_t> localEncodings; |
103 EncodingMap localEncodingIndexes; | 103 EncodingMap localEncodingIndexes; |
104 }; | 104 }; |
105 | 105 |
106 template <class Ptr> class UnwindInfoSectionImpl : public UnwindInfoSection { | 106 template <class Ptr> |
107 class UnwindInfoSectionImpl final : public UnwindInfoSection { | |
107 public: | 108 public: |
108 void prepareRelocations(InputSection *) override; | 109 void prepareRelocations(ConcatInputSection *) override; |
110 void addInput(ConcatInputSection *) override; | |
109 void finalize() override; | 111 void finalize() override; |
110 void writeTo(uint8_t *buf) const override; | 112 void writeTo(uint8_t *buf) const override; |
111 | 113 |
112 private: | 114 private: |
113 std::vector<std::pair<compact_unwind_encoding_t, size_t>> commonEncodings; | 115 std::vector<std::pair<compact_unwind_encoding_t, size_t>> commonEncodings; |
117 SmallDenseMap<std::pair<InputSection *, uint64_t /* addend */>, Symbol *> | 119 SmallDenseMap<std::pair<InputSection *, uint64_t /* addend */>, Symbol *> |
118 personalityTable; | 120 personalityTable; |
119 std::vector<unwind_info_section_header_lsda_index_entry> lsdaEntries; | 121 std::vector<unwind_info_section_header_lsda_index_entry> lsdaEntries; |
120 // Map of function offset (from the image base) to an index within the LSDA | 122 // Map of function offset (from the image base) to an index within the LSDA |
121 // array. | 123 // array. |
122 llvm::DenseMap<uint32_t, uint32_t> functionToLsdaIndex; | 124 DenseMap<uint32_t, uint32_t> functionToLsdaIndex; |
123 std::vector<CompactUnwindEntry<Ptr>> cuVector; | 125 std::vector<CompactUnwindEntry<Ptr>> cuVector; |
124 std::vector<CompactUnwindEntry<Ptr> *> cuPtrVector; | 126 std::vector<CompactUnwindEntry<Ptr> *> cuPtrVector; |
125 std::vector<SecondLevelPage> secondLevelPages; | 127 std::vector<SecondLevelPage> secondLevelPages; |
126 uint64_t level2PagesOffset = 0; | 128 uint64_t level2PagesOffset = 0; |
127 }; | 129 }; |
128 | 130 |
131 UnwindInfoSection::UnwindInfoSection() | |
132 : SyntheticSection(segment_names::text, section_names::unwindInfo) { | |
133 align = 4; | |
134 compactUnwindSection = | |
135 make<ConcatOutputSection>(section_names::compactUnwind); | |
136 } | |
137 | |
138 void UnwindInfoSection::prepareRelocations() { | |
139 for (ConcatInputSection *isec : compactUnwindSection->inputs) | |
140 prepareRelocations(isec); | |
141 } | |
142 | |
143 template <class Ptr> | |
144 void UnwindInfoSectionImpl<Ptr>::addInput(ConcatInputSection *isec) { | |
145 assert(isec->getSegName() == segment_names::ld && | |
146 isec->getName() == section_names::compactUnwind); | |
147 isec->parent = compactUnwindSection; | |
148 compactUnwindSection->addInput(isec); | |
149 } | |
150 | |
129 // Compact unwind relocations have different semantics, so we handle them in a | 151 // Compact unwind relocations have different semantics, so we handle them in a |
130 // separate code path from regular relocations. First, we do not wish to add | 152 // separate code path from regular relocations. First, we do not wish to add |
131 // rebase opcodes for __LD,__compact_unwind, because that section doesn't | 153 // rebase opcodes for __LD,__compact_unwind, because that section doesn't |
132 // actually end up in the final binary. Second, personality pointers always | 154 // actually end up in the final binary. Second, personality pointers always |
133 // reside in the GOT and must be treated specially. | 155 // reside in the GOT and must be treated specially. |
134 template <class Ptr> | 156 template <class Ptr> |
135 void UnwindInfoSectionImpl<Ptr>::prepareRelocations(InputSection *isec) { | 157 void UnwindInfoSectionImpl<Ptr>::prepareRelocations(ConcatInputSection *isec) { |
136 assert(isec->segname == segment_names::ld && | |
137 isec->name == section_names::compactUnwind); | |
138 assert(!isec->shouldOmitFromOutput() && | 158 assert(!isec->shouldOmitFromOutput() && |
139 "__compact_unwind section should not be omitted"); | 159 "__compact_unwind section should not be omitted"); |
140 | 160 |
141 // FIXME: This could skip relocations for CompactUnwindEntries that | 161 // FIXME: Make this skip relocations for CompactUnwindEntries that |
142 // point to dead-stripped functions. That might save some amount of | 162 // point to dead-stripped functions. That might save some amount of |
143 // work. But since there are usually just few personality functions | 163 // work. But since there are usually just few personality functions |
144 // that are referenced from many places, at least some of them likely | 164 // that are referenced from many places, at least some of them likely |
145 // live, it wouldn't reduce number of got entries. | 165 // live, it wouldn't reduce number of got entries. |
146 for (Reloc &r : isec->relocs) { | 166 for (size_t i = 0; i < isec->relocs.size(); ++i) { |
167 Reloc &r = isec->relocs[i]; | |
147 assert(target->hasAttr(r.type, RelocAttrBits::UNSIGNED)); | 168 assert(target->hasAttr(r.type, RelocAttrBits::UNSIGNED)); |
169 | |
170 if (r.offset % sizeof(CompactUnwindEntry<Ptr>) == 0) { | |
171 InputSection *referentIsec; | |
172 if (auto *isec = r.referent.dyn_cast<InputSection *>()) | |
173 referentIsec = isec; | |
174 else | |
175 referentIsec = cast<Defined>(r.referent.dyn_cast<Symbol *>())->isec; | |
176 | |
177 if (!cast<ConcatInputSection>(referentIsec)->shouldOmitFromOutput()) | |
178 allEntriesAreOmitted = false; | |
179 continue; | |
180 } | |
181 | |
148 if (r.offset % sizeof(CompactUnwindEntry<Ptr>) != | 182 if (r.offset % sizeof(CompactUnwindEntry<Ptr>) != |
149 offsetof(CompactUnwindEntry<Ptr>, personality)) | 183 offsetof(CompactUnwindEntry<Ptr>, personality)) |
150 continue; | 184 continue; |
151 | 185 |
152 if (auto *s = r.referent.dyn_cast<Symbol *>()) { | 186 if (auto *s = r.referent.dyn_cast<Symbol *>()) { |
172 in.got->addEntry(s); | 206 in.got->addEntry(s); |
173 continue; | 207 continue; |
174 } | 208 } |
175 | 209 |
176 if (auto *referentIsec = r.referent.dyn_cast<InputSection *>()) { | 210 if (auto *referentIsec = r.referent.dyn_cast<InputSection *>()) { |
177 assert(!referentIsec->isCoalescedWeak()); | 211 assert(!isCoalescedWeak(referentIsec)); |
178 | |
179 // Personality functions can be referenced via section relocations | 212 // Personality functions can be referenced via section relocations |
180 // if they live in the same object file. Create placeholder synthetic | 213 // if they live in the same object file. Create placeholder synthetic |
181 // symbols for them in the GOT. | 214 // symbols for them in the GOT. |
182 Symbol *&s = personalityTable[{referentIsec, r.addend}]; | 215 Symbol *&s = personalityTable[{referentIsec, r.addend}]; |
183 if (s == nullptr) { | 216 if (s == nullptr) { |
198 | 231 |
199 // Unwind info lives in __DATA, and finalization of __TEXT will occur before | 232 // Unwind info lives in __DATA, and finalization of __TEXT will occur before |
200 // finalization of __DATA. Moreover, the finalization of unwind info depends on | 233 // finalization of __DATA. Moreover, the finalization of unwind info depends on |
201 // the exact addresses that it references. So it is safe for compact unwind to | 234 // the exact addresses that it references. So it is safe for compact unwind to |
202 // reference addresses in __TEXT, but not addresses in any other segment. | 235 // reference addresses in __TEXT, but not addresses in any other segment. |
203 static void checkTextSegment(InputSection *isec) { | 236 static ConcatInputSection *checkTextSegment(InputSection *isec) { |
204 if (isec->segname != segment_names::text) | 237 if (isec->getSegName() != segment_names::text) |
205 error("compact unwind references address in " + toString(isec) + | 238 error("compact unwind references address in " + toString(isec) + |
206 " which is not in segment __TEXT"); | 239 " which is not in segment __TEXT"); |
207 } | 240 // __text should always be a ConcatInputSection. |
241 return cast<ConcatInputSection>(isec); | |
242 } | |
243 | |
244 template <class Ptr> | |
245 constexpr Ptr TombstoneValue = std::numeric_limits<Ptr>::max(); | |
208 | 246 |
209 // We need to apply the relocations to the pre-link compact unwind section | 247 // We need to apply the relocations to the pre-link compact unwind section |
210 // before converting it to post-link form. There should only be absolute | 248 // before converting it to post-link form. There should only be absolute |
211 // relocations here: since we are not emitting the pre-link CU section, there | 249 // relocations here: since we are not emitting the pre-link CU section, there |
212 // is no source address to make a relative location meaningful. | 250 // is no source address to make a relative location meaningful. |
213 template <class Ptr> | 251 template <class Ptr> |
214 static void | 252 static void |
215 relocateCompactUnwind(ConcatOutputSection *compactUnwindSection, | 253 relocateCompactUnwind(ConcatOutputSection *compactUnwindSection, |
216 std::vector<CompactUnwindEntry<Ptr>> &cuVector) { | 254 std::vector<CompactUnwindEntry<Ptr>> &cuVector) { |
217 for (const InputSection *isec : compactUnwindSection->inputs) { | 255 for (const ConcatInputSection *isec : compactUnwindSection->inputs) { |
218 assert(isec->parent == compactUnwindSection); | 256 assert(isec->parent == compactUnwindSection); |
219 | 257 |
220 uint8_t *buf = | 258 uint8_t *buf = |
221 reinterpret_cast<uint8_t *>(cuVector.data()) + isec->outSecFileOff; | 259 reinterpret_cast<uint8_t *>(cuVector.data()) + isec->outSecOff; |
222 memcpy(buf, isec->data.data(), isec->data.size()); | 260 memcpy(buf, isec->data.data(), isec->data.size()); |
223 | 261 |
224 for (const Reloc &r : isec->relocs) { | 262 for (const Reloc &r : isec->relocs) { |
225 uint64_t referentVA = 0; | 263 uint64_t referentVA = TombstoneValue<Ptr>; |
226 if (auto *referentSym = r.referent.dyn_cast<Symbol *>()) { | 264 if (auto *referentSym = r.referent.dyn_cast<Symbol *>()) { |
227 if (!isa<Undefined>(referentSym)) { | 265 if (!isa<Undefined>(referentSym)) { |
228 assert(referentSym->isInGot()); | |
229 if (auto *defined = dyn_cast<Defined>(referentSym)) | 266 if (auto *defined = dyn_cast<Defined>(referentSym)) |
230 checkTextSegment(defined->isec); | 267 checkTextSegment(defined->isec); |
231 // At this point in the link, we may not yet know the final address of | 268 // At this point in the link, we may not yet know the final address of |
232 // the GOT, so we just encode the index. We make it a 1-based index so | 269 // the GOT, so we just encode the index. We make it a 1-based index so |
233 // that we can distinguish the null pointer case. | 270 // that we can distinguish the null pointer case. |
234 referentVA = referentSym->gotIndex + 1; | 271 referentVA = referentSym->gotIndex + 1; |
235 } | 272 } |
236 } else if (auto *referentIsec = r.referent.dyn_cast<InputSection *>()) { | 273 } else { |
237 checkTextSegment(referentIsec); | 274 auto *referentIsec = r.referent.get<InputSection *>(); |
238 if (referentIsec->shouldOmitFromOutput()) | 275 ConcatInputSection *concatIsec = checkTextSegment(referentIsec); |
239 referentVA = UINT64_MAX; // Tombstone value | 276 if (!concatIsec->shouldOmitFromOutput()) |
240 else | 277 referentVA = referentIsec->getVA(r.addend); |
241 referentVA = referentIsec->getVA() + r.addend; | |
242 } | 278 } |
243 | |
244 writeAddress(buf + r.offset, referentVA, r.length); | 279 writeAddress(buf + r.offset, referentVA, r.length); |
245 } | 280 } |
246 } | 281 } |
247 } | 282 } |
248 | 283 |
249 // There should only be a handful of unique personality pointers, so we can | 284 // There should only be a handful of unique personality pointers, so we can |
250 // encode them as 2-bit indices into a small array. | 285 // encode them as 2-bit indices into a small array. |
251 template <class Ptr> | 286 template <class Ptr> |
252 void encodePersonalities( | 287 static void |
253 const std::vector<CompactUnwindEntry<Ptr> *> &cuPtrVector, | 288 encodePersonalities(const std::vector<CompactUnwindEntry<Ptr> *> &cuPtrVector, |
254 std::vector<uint32_t> &personalities) { | 289 std::vector<uint32_t> &personalities) { |
255 for (CompactUnwindEntry<Ptr> *cu : cuPtrVector) { | 290 for (CompactUnwindEntry<Ptr> *cu : cuPtrVector) { |
256 if (cu->personality == 0) | 291 if (cu->personality == 0) |
257 continue; | 292 continue; |
258 // Linear search is fast enough for a small array. | 293 // Linear search is fast enough for a small array. |
259 auto it = find(personalities, cu->personality); | 294 auto it = find(personalities, cu->personality); |
269 static_cast<compact_unwind_encoding_t>(UNWIND_PERSONALITY_MASK)); | 304 static_cast<compact_unwind_encoding_t>(UNWIND_PERSONALITY_MASK)); |
270 } | 305 } |
271 if (personalities.size() > 3) | 306 if (personalities.size() > 3) |
272 error("too many personalities (" + std::to_string(personalities.size()) + | 307 error("too many personalities (" + std::to_string(personalities.size()) + |
273 ") for compact unwind to encode"); | 308 ") for compact unwind to encode"); |
309 } | |
310 | |
311 // __unwind_info stores unwind data for address ranges. If several | |
312 // adjacent functions have the same unwind encoding, LSDA, and personality | |
313 // function, they share one unwind entry. For this to work, functions without | |
314 // unwind info need explicit "no unwind info" unwind entries -- else the | |
315 // unwinder would think they have the unwind info of the closest function | |
316 // with unwind info right before in the image. | |
317 template <class Ptr> | |
318 static void addEntriesForFunctionsWithoutUnwindInfo( | |
319 std::vector<CompactUnwindEntry<Ptr>> &cuVector) { | |
320 DenseSet<Ptr> hasUnwindInfo; | |
321 for (CompactUnwindEntry<Ptr> &cuEntry : cuVector) | |
322 if (cuEntry.functionAddress != TombstoneValue<Ptr>) | |
323 hasUnwindInfo.insert(cuEntry.functionAddress); | |
324 | |
325 // Add explicit "has no unwind info" entries for all global and local symbols | |
326 // without unwind info. | |
327 auto markNoUnwindInfo = [&cuVector, &hasUnwindInfo](const Defined *d) { | |
328 if (d->isLive() && d->isec && isCodeSection(d->isec)) { | |
329 Ptr ptr = d->getVA(); | |
330 if (!hasUnwindInfo.count(ptr)) | |
331 cuVector.push_back({ptr, 0, 0, 0, 0}); | |
332 } | |
333 }; | |
334 for (Symbol *sym : symtab->getSymbols()) | |
335 if (auto *d = dyn_cast<Defined>(sym)) | |
336 markNoUnwindInfo(d); | |
337 for (const InputFile *file : inputFiles) | |
338 if (auto *objFile = dyn_cast<ObjFile>(file)) | |
339 for (Symbol *sym : objFile->symbols) | |
340 if (auto *d = dyn_cast_or_null<Defined>(sym)) | |
341 if (!d->isExternal()) | |
342 markNoUnwindInfo(d); | |
343 } | |
344 | |
345 static bool canFoldEncoding(compact_unwind_encoding_t encoding) { | |
346 // From compact_unwind_encoding.h: | |
347 // UNWIND_X86_64_MODE_STACK_IND: | |
348 // A "frameless" (RBP not used as frame pointer) function large constant | |
349 // stack size. This case is like the previous, except the stack size is too | |
350 // large to encode in the compact unwind encoding. Instead it requires that | |
351 // the function contains "subq $nnnnnnnn,RSP" in its prolog. The compact | |
352 // encoding contains the offset to the nnnnnnnn value in the function in | |
353 // UNWIND_X86_64_FRAMELESS_STACK_SIZE. | |
354 // Since this means the unwinder has to look at the `subq` in the function | |
355 // of the unwind info's unwind address, two functions that have identical | |
356 // unwind info can't be folded if it's using this encoding since both | |
357 // entries need unique addresses. | |
358 static_assert(UNWIND_X86_64_MODE_MASK == UNWIND_X86_MODE_MASK, ""); | |
359 static_assert(UNWIND_X86_64_MODE_STACK_IND == UNWIND_X86_MODE_STACK_IND, ""); | |
360 if ((target->cpuType == CPU_TYPE_X86_64 || target->cpuType == CPU_TYPE_X86) && | |
361 (encoding & UNWIND_X86_64_MODE_MASK) == UNWIND_X86_64_MODE_STACK_IND) { | |
362 // FIXME: Consider passing in the two function addresses and getting | |
363 // their two stack sizes off the `subq` and only returning false if they're | |
364 // actually different. | |
365 return false; | |
366 } | |
367 return true; | |
274 } | 368 } |
275 | 369 |
276 // Scan the __LD,__compact_unwind entries and compute the space needs of | 370 // Scan the __LD,__compact_unwind entries and compute the space needs of |
277 // __TEXT,__unwind_info and __TEXT,__eh_frame | 371 // __TEXT,__unwind_info and __TEXT,__eh_frame |
278 template <class Ptr> void UnwindInfoSectionImpl<Ptr>::finalize() { | 372 template <class Ptr> void UnwindInfoSectionImpl<Ptr>::finalize() { |
292 size_t cuCount = | 386 size_t cuCount = |
293 compactUnwindSection->getSize() / sizeof(CompactUnwindEntry<Ptr>); | 387 compactUnwindSection->getSize() / sizeof(CompactUnwindEntry<Ptr>); |
294 cuVector.resize(cuCount); | 388 cuVector.resize(cuCount); |
295 relocateCompactUnwind(compactUnwindSection, cuVector); | 389 relocateCompactUnwind(compactUnwindSection, cuVector); |
296 | 390 |
391 addEntriesForFunctionsWithoutUnwindInfo(cuVector); | |
392 | |
297 // Rather than sort & fold the 32-byte entries directly, we create a | 393 // Rather than sort & fold the 32-byte entries directly, we create a |
298 // vector of pointers to entries and sort & fold that instead. | 394 // vector of pointers to entries and sort & fold that instead. |
299 cuPtrVector.reserve(cuCount); | 395 cuPtrVector.reserve(cuVector.size()); |
300 for (CompactUnwindEntry<Ptr> &cuEntry : cuVector) | 396 for (CompactUnwindEntry<Ptr> &cuEntry : cuVector) |
301 cuPtrVector.emplace_back(&cuEntry); | 397 cuPtrVector.emplace_back(&cuEntry); |
302 llvm::sort(cuPtrVector, [](const CompactUnwindEntry<Ptr> *a, | 398 llvm::sort(cuPtrVector, [](const CompactUnwindEntry<Ptr> *a, |
303 const CompactUnwindEntry<Ptr> *b) { | 399 const CompactUnwindEntry<Ptr> *b) { |
304 return a->functionAddress < b->functionAddress; | 400 return a->functionAddress < b->functionAddress; |
305 }); | 401 }); |
306 | 402 |
307 // Dead-stripped functions get a functionAddress of UINT64_MAX in | 403 // Dead-stripped functions get a functionAddress of TombstoneValue in |
308 // relocateCompactUnwind(). Filter them out here. | 404 // relocateCompactUnwind(). Filter them out here. |
309 // FIXME: This doesn't yet collect associated data like LSDAs kept | 405 // FIXME: This doesn't yet collect associated data like LSDAs kept |
310 // alive only by a now-removed CompactUnwindEntry or other comdat-like | 406 // alive only by a now-removed CompactUnwindEntry or other comdat-like |
311 // data (`kindNoneGroupSubordinate*` in ld64). | 407 // data (`kindNoneGroupSubordinate*` in ld64). |
312 CompactUnwindEntry<Ptr> tombstone; | 408 CompactUnwindEntry<Ptr> tombstone; |
313 tombstone.functionAddress = static_cast<Ptr>(UINT64_MAX); | 409 tombstone.functionAddress = TombstoneValue<Ptr>; |
314 cuPtrVector.erase( | 410 cuPtrVector.erase( |
315 std::lower_bound(cuPtrVector.begin(), cuPtrVector.end(), &tombstone, | 411 std::lower_bound(cuPtrVector.begin(), cuPtrVector.end(), &tombstone, |
316 [](const CompactUnwindEntry<Ptr> *a, | 412 [](const CompactUnwindEntry<Ptr> *a, |
317 const CompactUnwindEntry<Ptr> *b) { | 413 const CompactUnwindEntry<Ptr> *b) { |
318 return a->functionAddress < b->functionAddress; | 414 return a->functionAddress < b->functionAddress; |
319 }), | 415 }), |
320 cuPtrVector.end()); | 416 cuPtrVector.end()); |
417 | |
418 // If there are no entries left after adding explicit "no unwind info" | |
419 // entries and removing entries for dead-stripped functions, don't write | |
420 // an __unwind_info section at all. | |
421 assert(allEntriesAreOmitted == cuPtrVector.empty()); | |
422 if (cuPtrVector.empty()) | |
423 return; | |
321 | 424 |
322 // Fold adjacent entries with matching encoding+personality+lsda | 425 // Fold adjacent entries with matching encoding+personality+lsda |
323 // We use three iterators on the same cuPtrVector to fold in-situ: | 426 // We use three iterators on the same cuPtrVector to fold in-situ: |
324 // (1) `foldBegin` is the first of a potential sequence of matching entries | 427 // (1) `foldBegin` is the first of a potential sequence of matching entries |
325 // (2) `foldEnd` is the first non-matching entry after `foldBegin`. | 428 // (2) `foldEnd` is the first non-matching entry after `foldBegin`. |
330 for (auto foldBegin = cuPtrVector.begin(); foldBegin < cuPtrVector.end();) { | 433 for (auto foldBegin = cuPtrVector.begin(); foldBegin < cuPtrVector.end();) { |
331 auto foldEnd = foldBegin; | 434 auto foldEnd = foldBegin; |
332 while (++foldEnd < cuPtrVector.end() && | 435 while (++foldEnd < cuPtrVector.end() && |
333 (*foldBegin)->encoding == (*foldEnd)->encoding && | 436 (*foldBegin)->encoding == (*foldEnd)->encoding && |
334 (*foldBegin)->personality == (*foldEnd)->personality && | 437 (*foldBegin)->personality == (*foldEnd)->personality && |
335 (*foldBegin)->lsda == (*foldEnd)->lsda) | 438 (*foldBegin)->lsda == (*foldEnd)->lsda && |
439 canFoldEncoding((*foldEnd)->encoding)) | |
336 ; | 440 ; |
337 *foldWrite++ = *foldBegin; | 441 *foldWrite++ = *foldBegin; |
338 foldBegin = foldEnd; | 442 foldBegin = foldEnd; |
339 } | 443 } |
340 cuPtrVector.erase(foldWrite, cuPtrVector.end()); | 444 cuPtrVector.erase(foldWrite, cuPtrVector.end()); |
445 | 549 |
446 // All inputs are relocated and output addresses are known, so write! | 550 // All inputs are relocated and output addresses are known, so write! |
447 | 551 |
448 template <class Ptr> | 552 template <class Ptr> |
449 void UnwindInfoSectionImpl<Ptr>::writeTo(uint8_t *buf) const { | 553 void UnwindInfoSectionImpl<Ptr>::writeTo(uint8_t *buf) const { |
554 assert(!cuPtrVector.empty() && "call only if there is unwind info"); | |
555 | |
450 // section header | 556 // section header |
451 auto *uip = reinterpret_cast<unwind_info_section_header *>(buf); | 557 auto *uip = reinterpret_cast<unwind_info_section_header *>(buf); |
452 uip->version = 1; | 558 uip->version = 1; |
453 uip->commonEncodingsArraySectionOffset = sizeof(unwind_info_section_header); | 559 uip->commonEncodingsArraySectionOffset = sizeof(unwind_info_section_header); |
454 uip->commonEncodingsArrayCount = commonEncodings.size(); | 560 uip->commonEncodingsArrayCount = commonEncodings.size(); |
485 sizeof(unwind_info_section_header_lsda_index_entry); | 591 sizeof(unwind_info_section_header_lsda_index_entry); |
486 iep++; | 592 iep++; |
487 l2PagesOffset += SECOND_LEVEL_PAGE_BYTES; | 593 l2PagesOffset += SECOND_LEVEL_PAGE_BYTES; |
488 } | 594 } |
489 // Level-1 sentinel | 595 // Level-1 sentinel |
490 const CompactUnwindEntry<Ptr> &cuEnd = cuVector.back(); | 596 const CompactUnwindEntry<Ptr> &cuEnd = *cuPtrVector.back(); |
491 iep->functionOffset = cuEnd.functionAddress + cuEnd.functionLength; | 597 assert(cuEnd.functionAddress != TombstoneValue<Ptr>); |
598 iep->functionOffset = | |
599 cuEnd.functionAddress - in.header->addr + cuEnd.functionLength; | |
492 iep->secondLevelPagesSectionOffset = 0; | 600 iep->secondLevelPagesSectionOffset = 0; |
493 iep->lsdaIndexArraySectionOffset = | 601 iep->lsdaIndexArraySectionOffset = |
494 lsdaOffset + | 602 lsdaOffset + |
495 lsdaEntries.size() * sizeof(unwind_info_section_header_lsda_index_entry); | 603 lsdaEntries.size() * sizeof(unwind_info_section_header_lsda_index_entry); |
496 iep++; | 604 iep++; |