Mercurial > hg > CbC > CbC_llvm
comparison lib/ExecutionEngine/JITLink/JITLinkGeneric.cpp @ 148:63bd29f05246
merged
author | Shinji KONO <kono@ie.u-ryukyu.ac.jp> |
---|---|
date | Wed, 14 Aug 2019 19:46:37 +0900 |
parents | c2174574ed3a |
children |
comparison
equal
deleted
inserted
replaced
146:3fc4d5c3e21e | 148:63bd29f05246 |
---|---|
1 //===--------- JITLinkGeneric.cpp - Generic JIT linker utilities ----------===// | |
2 // | |
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | |
4 // See https://llvm.org/LICENSE.txt for license information. | |
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | |
6 // | |
7 //===----------------------------------------------------------------------===// | |
8 // | |
9 // Generic JITLinker utility class. | |
10 // | |
11 //===----------------------------------------------------------------------===// | |
12 | |
13 #include "JITLinkGeneric.h" | |
14 #include "EHFrameSupportImpl.h" | |
15 | |
16 #include "llvm/Support/BinaryStreamReader.h" | |
17 #include "llvm/Support/MemoryBuffer.h" | |
18 | |
19 #define DEBUG_TYPE "jitlink" | |
20 | |
21 namespace llvm { | |
22 namespace jitlink { | |
23 | |
24 JITLinkerBase::~JITLinkerBase() {} | |
25 | |
26 void JITLinkerBase::linkPhase1(std::unique_ptr<JITLinkerBase> Self) { | |
27 | |
28 // Build the atom graph. | |
29 if (auto GraphOrErr = buildGraph(Ctx->getObjectBuffer())) | |
30 G = std::move(*GraphOrErr); | |
31 else | |
32 return Ctx->notifyFailed(GraphOrErr.takeError()); | |
33 assert(G && "Graph should have been created by buildGraph above"); | |
34 | |
35 // Prune and optimize the graph. | |
36 if (auto Err = runPasses(Passes.PrePrunePasses, *G)) | |
37 return Ctx->notifyFailed(std::move(Err)); | |
38 | |
39 LLVM_DEBUG({ | |
40 dbgs() << "Atom graph \"" << G->getName() << "\" pre-pruning:\n"; | |
41 dumpGraph(dbgs()); | |
42 }); | |
43 | |
44 prune(*G); | |
45 | |
46 LLVM_DEBUG({ | |
47 dbgs() << "Atom graph \"" << G->getName() << "\" post-pruning:\n"; | |
48 dumpGraph(dbgs()); | |
49 }); | |
50 | |
51 // Run post-pruning passes. | |
52 if (auto Err = runPasses(Passes.PostPrunePasses, *G)) | |
53 return Ctx->notifyFailed(std::move(Err)); | |
54 | |
55 // Sort atoms into segments. | |
56 layOutAtoms(); | |
57 | |
58 // Allocate memory for segments. | |
59 if (auto Err = allocateSegments(Layout)) | |
60 return Ctx->notifyFailed(std::move(Err)); | |
61 | |
62 // Notify client that the defined atoms have been assigned addresses. | |
63 Ctx->notifyResolved(*G); | |
64 | |
65 auto ExternalSymbols = getExternalSymbolNames(); | |
66 | |
67 // We're about to hand off ownership of ourself to the continuation. Grab a | |
68 // pointer to the context so that we can call it to initiate the lookup. | |
69 // | |
70 // FIXME: Once callee expressions are defined to be sequenced before argument | |
71 // expressions (c++17) we can simplify all this to: | |
72 // | |
73 // Ctx->lookup(std::move(UnresolvedExternals), | |
74 // [Self=std::move(Self)](Expected<AsyncLookupResult> Result) { | |
75 // Self->linkPhase2(std::move(Self), std::move(Result)); | |
76 // }); | |
77 // | |
78 // FIXME: Use move capture once we have c++14. | |
79 auto *TmpCtx = Ctx.get(); | |
80 auto *UnownedSelf = Self.release(); | |
81 auto Phase2Continuation = | |
82 [UnownedSelf](Expected<AsyncLookupResult> LookupResult) { | |
83 std::unique_ptr<JITLinkerBase> Self(UnownedSelf); | |
84 UnownedSelf->linkPhase2(std::move(Self), std::move(LookupResult)); | |
85 }; | |
86 TmpCtx->lookup(std::move(ExternalSymbols), std::move(Phase2Continuation)); | |
87 } | |
88 | |
89 void JITLinkerBase::linkPhase2(std::unique_ptr<JITLinkerBase> Self, | |
90 Expected<AsyncLookupResult> LR) { | |
91 // If the lookup failed, bail out. | |
92 if (!LR) | |
93 return deallocateAndBailOut(LR.takeError()); | |
94 | |
95 // Assign addresses to external atoms. | |
96 applyLookupResult(*LR); | |
97 | |
98 LLVM_DEBUG({ | |
99 dbgs() << "Atom graph \"" << G->getName() << "\" before copy-and-fixup:\n"; | |
100 dumpGraph(dbgs()); | |
101 }); | |
102 | |
103 // Copy atom content to working memory and fix up. | |
104 if (auto Err = copyAndFixUpAllAtoms(Layout, *Alloc)) | |
105 return deallocateAndBailOut(std::move(Err)); | |
106 | |
107 LLVM_DEBUG({ | |
108 dbgs() << "Atom graph \"" << G->getName() << "\" after copy-and-fixup:\n"; | |
109 dumpGraph(dbgs()); | |
110 }); | |
111 | |
112 if (auto Err = runPasses(Passes.PostFixupPasses, *G)) | |
113 return deallocateAndBailOut(std::move(Err)); | |
114 | |
115 // FIXME: Use move capture once we have c++14. | |
116 auto *UnownedSelf = Self.release(); | |
117 auto Phase3Continuation = [UnownedSelf](Error Err) { | |
118 std::unique_ptr<JITLinkerBase> Self(UnownedSelf); | |
119 UnownedSelf->linkPhase3(std::move(Self), std::move(Err)); | |
120 }; | |
121 | |
122 Alloc->finalizeAsync(std::move(Phase3Continuation)); | |
123 } | |
124 | |
125 void JITLinkerBase::linkPhase3(std::unique_ptr<JITLinkerBase> Self, Error Err) { | |
126 if (Err) | |
127 return deallocateAndBailOut(std::move(Err)); | |
128 Ctx->notifyFinalized(std::move(Alloc)); | |
129 } | |
130 | |
131 Error JITLinkerBase::runPasses(AtomGraphPassList &Passes, AtomGraph &G) { | |
132 for (auto &P : Passes) | |
133 if (auto Err = P(G)) | |
134 return Err; | |
135 return Error::success(); | |
136 } | |
137 | |
138 void JITLinkerBase::layOutAtoms() { | |
139 // Group sections by protections, and whether or not they're zero-fill. | |
140 for (auto &S : G->sections()) { | |
141 | |
142 // Skip empty sections. | |
143 if (S.atoms_empty()) | |
144 continue; | |
145 | |
146 auto &SL = Layout[S.getProtectionFlags()]; | |
147 if (S.isZeroFill()) | |
148 SL.ZeroFillSections.push_back(SegmentLayout::SectionLayout(S)); | |
149 else | |
150 SL.ContentSections.push_back(SegmentLayout::SectionLayout(S)); | |
151 } | |
152 | |
153 // Sort sections within the layout by ordinal. | |
154 { | |
155 auto CompareByOrdinal = [](const SegmentLayout::SectionLayout &LHS, | |
156 const SegmentLayout::SectionLayout &RHS) { | |
157 return LHS.S->getSectionOrdinal() < RHS.S->getSectionOrdinal(); | |
158 }; | |
159 for (auto &KV : Layout) { | |
160 auto &SL = KV.second; | |
161 std::sort(SL.ContentSections.begin(), SL.ContentSections.end(), | |
162 CompareByOrdinal); | |
163 std::sort(SL.ZeroFillSections.begin(), SL.ZeroFillSections.end(), | |
164 CompareByOrdinal); | |
165 } | |
166 } | |
167 | |
168 // Add atoms to the sections. | |
169 for (auto &KV : Layout) { | |
170 auto &SL = KV.second; | |
171 for (auto *SIList : {&SL.ContentSections, &SL.ZeroFillSections}) { | |
172 for (auto &SI : *SIList) { | |
173 // First build the set of layout-heads (i.e. "heads" of layout-next | |
174 // chains) by copying the section atoms, then eliminating any that | |
175 // appear as layout-next targets. | |
176 DenseSet<DefinedAtom *> LayoutHeads; | |
177 for (auto *DA : SI.S->atoms()) | |
178 LayoutHeads.insert(DA); | |
179 | |
180 for (auto *DA : SI.S->atoms()) | |
181 if (DA->hasLayoutNext()) | |
182 LayoutHeads.erase(&DA->getLayoutNext()); | |
183 | |
184 // Next, sort the layout heads by address order. | |
185 std::vector<DefinedAtom *> OrderedLayoutHeads; | |
186 OrderedLayoutHeads.reserve(LayoutHeads.size()); | |
187 for (auto *DA : LayoutHeads) | |
188 OrderedLayoutHeads.push_back(DA); | |
189 | |
190 // Now sort the list of layout heads by address. | |
191 std::sort(OrderedLayoutHeads.begin(), OrderedLayoutHeads.end(), | |
192 [](const DefinedAtom *LHS, const DefinedAtom *RHS) { | |
193 return LHS->getAddress() < RHS->getAddress(); | |
194 }); | |
195 | |
196 // Now populate the SI.Atoms field by appending each of the chains. | |
197 for (auto *DA : OrderedLayoutHeads) { | |
198 SI.Atoms.push_back(DA); | |
199 while (DA->hasLayoutNext()) { | |
200 auto &Next = DA->getLayoutNext(); | |
201 SI.Atoms.push_back(&Next); | |
202 DA = &Next; | |
203 } | |
204 } | |
205 } | |
206 } | |
207 } | |
208 | |
209 LLVM_DEBUG({ | |
210 dbgs() << "Segment ordering:\n"; | |
211 for (auto &KV : Layout) { | |
212 dbgs() << " Segment " | |
213 << static_cast<sys::Memory::ProtectionFlags>(KV.first) << ":\n"; | |
214 auto &SL = KV.second; | |
215 for (auto &SIEntry : | |
216 {std::make_pair(&SL.ContentSections, "content sections"), | |
217 std::make_pair(&SL.ZeroFillSections, "zero-fill sections")}) { | |
218 auto &SIList = *SIEntry.first; | |
219 dbgs() << " " << SIEntry.second << ":\n"; | |
220 for (auto &SI : SIList) { | |
221 dbgs() << " " << SI.S->getName() << ":\n"; | |
222 for (auto *DA : SI.Atoms) | |
223 dbgs() << " " << *DA << "\n"; | |
224 } | |
225 } | |
226 } | |
227 }); | |
228 } | |
229 | |
230 Error JITLinkerBase::allocateSegments(const SegmentLayoutMap &Layout) { | |
231 | |
232 // Compute segment sizes and allocate memory. | |
233 LLVM_DEBUG(dbgs() << "JIT linker requesting: { "); | |
234 JITLinkMemoryManager::SegmentsRequestMap Segments; | |
235 for (auto &KV : Layout) { | |
236 auto &Prot = KV.first; | |
237 auto &SegLayout = KV.second; | |
238 | |
239 // Calculate segment content size. | |
240 size_t SegContentSize = 0; | |
241 for (auto &SI : SegLayout.ContentSections) { | |
242 assert(!SI.S->atoms_empty() && "Sections in layout must not be empty"); | |
243 assert(!SI.Atoms.empty() && "Section layouts must not be empty"); | |
244 | |
245 // Bump to section alignment before processing atoms. | |
246 SegContentSize = alignTo(SegContentSize, SI.S->getAlignment()); | |
247 | |
248 for (auto *DA : SI.Atoms) { | |
249 SegContentSize = alignTo(SegContentSize, DA->getAlignment()); | |
250 SegContentSize += DA->getSize(); | |
251 } | |
252 } | |
253 | |
254 // Get segment content alignment. | |
255 unsigned SegContentAlign = 1; | |
256 if (!SegLayout.ContentSections.empty()) { | |
257 auto &FirstContentSection = SegLayout.ContentSections.front(); | |
258 SegContentAlign = | |
259 std::max(FirstContentSection.S->getAlignment(), | |
260 FirstContentSection.Atoms.front()->getAlignment()); | |
261 } | |
262 | |
263 // Calculate segment zero-fill size. | |
264 uint64_t SegZeroFillSize = 0; | |
265 for (auto &SI : SegLayout.ZeroFillSections) { | |
266 assert(!SI.S->atoms_empty() && "Sections in layout must not be empty"); | |
267 assert(!SI.Atoms.empty() && "Section layouts must not be empty"); | |
268 | |
269 // Bump to section alignment before processing atoms. | |
270 SegZeroFillSize = alignTo(SegZeroFillSize, SI.S->getAlignment()); | |
271 | |
272 for (auto *DA : SI.Atoms) { | |
273 SegZeroFillSize = alignTo(SegZeroFillSize, DA->getAlignment()); | |
274 SegZeroFillSize += DA->getSize(); | |
275 } | |
276 } | |
277 | |
278 // Calculate segment zero-fill alignment. | |
279 uint32_t SegZeroFillAlign = 1; | |
280 | |
281 if (!SegLayout.ZeroFillSections.empty()) { | |
282 auto &FirstZeroFillSection = SegLayout.ZeroFillSections.front(); | |
283 SegZeroFillAlign = | |
284 std::max(FirstZeroFillSection.S->getAlignment(), | |
285 FirstZeroFillSection.Atoms.front()->getAlignment()); | |
286 } | |
287 | |
288 if (SegContentSize == 0) | |
289 SegContentAlign = SegZeroFillAlign; | |
290 | |
291 if (SegContentAlign % SegZeroFillAlign != 0) | |
292 return make_error<JITLinkError>("First content atom alignment does not " | |
293 "accommodate first zero-fill atom " | |
294 "alignment"); | |
295 | |
296 Segments[Prot] = {SegContentSize, SegContentAlign, SegZeroFillSize, | |
297 SegZeroFillAlign}; | |
298 | |
299 LLVM_DEBUG({ | |
300 dbgs() << (&KV == &*Layout.begin() ? "" : "; ") | |
301 << static_cast<sys::Memory::ProtectionFlags>(Prot) << ": " | |
302 << SegContentSize << " content bytes (alignment " | |
303 << SegContentAlign << ") + " << SegZeroFillSize | |
304 << " zero-fill bytes (alignment " << SegZeroFillAlign << ")"; | |
305 }); | |
306 } | |
307 LLVM_DEBUG(dbgs() << " }\n"); | |
308 | |
309 if (auto AllocOrErr = Ctx->getMemoryManager().allocate(Segments)) | |
310 Alloc = std::move(*AllocOrErr); | |
311 else | |
312 return AllocOrErr.takeError(); | |
313 | |
314 LLVM_DEBUG({ | |
315 dbgs() << "JIT linker got working memory:\n"; | |
316 for (auto &KV : Layout) { | |
317 auto Prot = static_cast<sys::Memory::ProtectionFlags>(KV.first); | |
318 dbgs() << " " << Prot << ": " | |
319 << (const void *)Alloc->getWorkingMemory(Prot).data() << "\n"; | |
320 } | |
321 }); | |
322 | |
323 // Update atom target addresses. | |
324 for (auto &KV : Layout) { | |
325 auto &Prot = KV.first; | |
326 auto &SL = KV.second; | |
327 | |
328 JITTargetAddress AtomTargetAddr = | |
329 Alloc->getTargetMemory(static_cast<sys::Memory::ProtectionFlags>(Prot)); | |
330 | |
331 for (auto *SIList : {&SL.ContentSections, &SL.ZeroFillSections}) | |
332 for (auto &SI : *SIList) { | |
333 AtomTargetAddr = alignTo(AtomTargetAddr, SI.S->getAlignment()); | |
334 for (auto *DA : SI.Atoms) { | |
335 AtomTargetAddr = alignTo(AtomTargetAddr, DA->getAlignment()); | |
336 DA->setAddress(AtomTargetAddr); | |
337 AtomTargetAddr += DA->getSize(); | |
338 } | |
339 } | |
340 } | |
341 | |
342 return Error::success(); | |
343 } | |
344 | |
345 DenseSet<StringRef> JITLinkerBase::getExternalSymbolNames() const { | |
346 // Identify unresolved external atoms. | |
347 DenseSet<StringRef> UnresolvedExternals; | |
348 for (auto *DA : G->external_atoms()) { | |
349 assert(DA->getAddress() == 0 && | |
350 "External has already been assigned an address"); | |
351 assert(DA->getName() != StringRef() && DA->getName() != "" && | |
352 "Externals must be named"); | |
353 UnresolvedExternals.insert(DA->getName()); | |
354 } | |
355 return UnresolvedExternals; | |
356 } | |
357 | |
358 void JITLinkerBase::applyLookupResult(AsyncLookupResult Result) { | |
359 for (auto &KV : Result) { | |
360 Atom &A = G->getAtomByName(KV.first); | |
361 assert(A.getAddress() == 0 && "Atom already resolved"); | |
362 A.setAddress(KV.second.getAddress()); | |
363 } | |
364 | |
365 LLVM_DEBUG({ | |
366 dbgs() << "Externals after applying lookup result:\n"; | |
367 for (auto *A : G->external_atoms()) | |
368 dbgs() << " " << A->getName() << ": " | |
369 << formatv("{0:x16}", A->getAddress()) << "\n"; | |
370 }); | |
371 assert(llvm::all_of(G->external_atoms(), | |
372 [](Atom *A) { return A->getAddress() != 0; }) && | |
373 "All atoms should have been resolved by this point"); | |
374 } | |
375 | |
376 void JITLinkerBase::deallocateAndBailOut(Error Err) { | |
377 assert(Err && "Should not be bailing out on success value"); | |
378 assert(Alloc && "can not call deallocateAndBailOut before allocation"); | |
379 Ctx->notifyFailed(joinErrors(std::move(Err), Alloc->deallocate())); | |
380 } | |
381 | |
382 void JITLinkerBase::dumpGraph(raw_ostream &OS) { | |
383 assert(G && "Graph is not set yet"); | |
384 G->dump(dbgs(), [this](Edge::Kind K) { return getEdgeKindName(K); }); | |
385 } | |
386 | |
387 void prune(AtomGraph &G) { | |
388 std::vector<DefinedAtom *> Worklist; | |
389 DenseMap<DefinedAtom *, std::vector<Edge *>> EdgesToUpdate; | |
390 | |
391 // Build the initial worklist from all atoms initially live. | |
392 for (auto *DA : G.defined_atoms()) { | |
393 if (!DA->isLive() || DA->shouldDiscard()) | |
394 continue; | |
395 | |
396 for (auto &E : DA->edges()) { | |
397 if (!E.getTarget().isDefined()) | |
398 continue; | |
399 | |
400 auto &EDT = static_cast<DefinedAtom &>(E.getTarget()); | |
401 | |
402 if (EDT.shouldDiscard()) | |
403 EdgesToUpdate[&EDT].push_back(&E); | |
404 else if (E.isKeepAlive() && !EDT.isLive()) | |
405 Worklist.push_back(&EDT); | |
406 } | |
407 } | |
408 | |
409 // Propagate live flags to all atoms reachable from the initial live set. | |
410 while (!Worklist.empty()) { | |
411 DefinedAtom &NextLive = *Worklist.back(); | |
412 Worklist.pop_back(); | |
413 | |
414 assert(!NextLive.shouldDiscard() && | |
415 "should-discard nodes should never make it into the worklist"); | |
416 | |
417 // If this atom has already been marked as live, or is marked to be | |
418 // discarded, then skip it. | |
419 if (NextLive.isLive()) | |
420 continue; | |
421 | |
422 // Otherwise set it as live and add any non-live atoms that it points to | |
423 // to the worklist. | |
424 NextLive.setLive(true); | |
425 | |
426 for (auto &E : NextLive.edges()) { | |
427 if (!E.getTarget().isDefined()) | |
428 continue; | |
429 | |
430 auto &EDT = static_cast<DefinedAtom &>(E.getTarget()); | |
431 | |
432 if (EDT.shouldDiscard()) | |
433 EdgesToUpdate[&EDT].push_back(&E); | |
434 else if (E.isKeepAlive() && !EDT.isLive()) | |
435 Worklist.push_back(&EDT); | |
436 } | |
437 } | |
438 | |
439 // Collect atoms to remove, then remove them from the graph. | |
440 std::vector<DefinedAtom *> AtomsToRemove; | |
441 for (auto *DA : G.defined_atoms()) | |
442 if (DA->shouldDiscard() || !DA->isLive()) | |
443 AtomsToRemove.push_back(DA); | |
444 | |
445 LLVM_DEBUG(dbgs() << "Pruning atoms:\n"); | |
446 for (auto *DA : AtomsToRemove) { | |
447 LLVM_DEBUG(dbgs() << " " << *DA << "... "); | |
448 | |
449 // Check whether we need to replace this atom with an external atom. | |
450 // | |
451 // We replace if all of the following hold: | |
452 // (1) The atom is marked should-discard, | |
453 // (2) it has live edges (i.e. edges from live atoms) pointing to it. | |
454 // | |
455 // Otherwise we simply delete the atom. | |
456 | |
457 G.removeDefinedAtom(*DA); | |
458 | |
459 auto EdgesToUpdateItr = EdgesToUpdate.find(DA); | |
460 if (EdgesToUpdateItr != EdgesToUpdate.end()) { | |
461 auto &ExternalReplacement = G.addExternalAtom(DA->getName()); | |
462 for (auto *EdgeToUpdate : EdgesToUpdateItr->second) | |
463 EdgeToUpdate->setTarget(ExternalReplacement); | |
464 LLVM_DEBUG(dbgs() << "replaced with " << ExternalReplacement << "\n"); | |
465 } else | |
466 LLVM_DEBUG(dbgs() << "deleted\n"); | |
467 } | |
468 | |
469 // Finally, discard any absolute symbols that were marked should-discard. | |
470 { | |
471 std::vector<Atom *> AbsoluteAtomsToRemove; | |
472 for (auto *A : G.absolute_atoms()) | |
473 if (A->shouldDiscard() || A->isLive()) | |
474 AbsoluteAtomsToRemove.push_back(A); | |
475 for (auto *A : AbsoluteAtomsToRemove) | |
476 G.removeAbsoluteAtom(*A); | |
477 } | |
478 } | |
479 | |
480 } // end namespace jitlink | |
481 } // end namespace llvm |