Mercurial > hg > CbC > CbC_llvm
comparison lib/Transforms/IPO/GlobalOpt.cpp @ 0:95c75e76d11b LLVM3.4
LLVM 3.4
author | Kaito Tokumori <e105711@ie.u-ryukyu.ac.jp> |
---|---|
date | Thu, 12 Dec 2013 13:56:28 +0900 |
parents | |
children | e4204d083e25 |
comparison
equal
deleted
inserted
replaced
-1:000000000000 | 0:95c75e76d11b |
---|---|
1 //===- GlobalOpt.cpp - Optimize Global Variables --------------------------===// | |
2 // | |
3 // The LLVM Compiler Infrastructure | |
4 // | |
5 // This file is distributed under the University of Illinois Open Source | |
6 // License. See LICENSE.TXT for details. | |
7 // | |
8 //===----------------------------------------------------------------------===// | |
9 // | |
10 // This pass transforms simple global variables that never have their address | |
11 // taken. If obviously true, it marks read/write globals as constant, deletes | |
12 // variables only stored to, etc. | |
13 // | |
14 //===----------------------------------------------------------------------===// | |
15 | |
16 #define DEBUG_TYPE "globalopt" | |
17 #include "llvm/Transforms/IPO.h" | |
18 #include "llvm/ADT/DenseMap.h" | |
19 #include "llvm/ADT/STLExtras.h" | |
20 #include "llvm/ADT/SmallPtrSet.h" | |
21 #include "llvm/ADT/SmallVector.h" | |
22 #include "llvm/ADT/Statistic.h" | |
23 #include "llvm/Analysis/ConstantFolding.h" | |
24 #include "llvm/Analysis/MemoryBuiltins.h" | |
25 #include "llvm/IR/CallingConv.h" | |
26 #include "llvm/IR/Constants.h" | |
27 #include "llvm/IR/DataLayout.h" | |
28 #include "llvm/IR/DerivedTypes.h" | |
29 #include "llvm/IR/Instructions.h" | |
30 #include "llvm/IR/IntrinsicInst.h" | |
31 #include "llvm/IR/Module.h" | |
32 #include "llvm/IR/Operator.h" | |
33 #include "llvm/Pass.h" | |
34 #include "llvm/Support/CallSite.h" | |
35 #include "llvm/Support/Debug.h" | |
36 #include "llvm/Support/ErrorHandling.h" | |
37 #include "llvm/Support/GetElementPtrTypeIterator.h" | |
38 #include "llvm/Support/MathExtras.h" | |
39 #include "llvm/Support/raw_ostream.h" | |
40 #include "llvm/Target/TargetLibraryInfo.h" | |
41 #include "llvm/Transforms/Utils/GlobalStatus.h" | |
42 #include "llvm/Transforms/Utils/ModuleUtils.h" | |
43 #include <algorithm> | |
44 using namespace llvm; | |
45 | |
46 STATISTIC(NumMarked , "Number of globals marked constant"); | |
47 STATISTIC(NumUnnamed , "Number of globals marked unnamed_addr"); | |
48 STATISTIC(NumSRA , "Number of aggregate globals broken into scalars"); | |
49 STATISTIC(NumHeapSRA , "Number of heap objects SRA'd"); | |
50 STATISTIC(NumSubstitute,"Number of globals with initializers stored into them"); | |
51 STATISTIC(NumDeleted , "Number of globals deleted"); | |
52 STATISTIC(NumFnDeleted , "Number of functions deleted"); | |
53 STATISTIC(NumGlobUses , "Number of global uses devirtualized"); | |
54 STATISTIC(NumLocalized , "Number of globals localized"); | |
55 STATISTIC(NumShrunkToBool , "Number of global vars shrunk to booleans"); | |
56 STATISTIC(NumFastCallFns , "Number of functions converted to fastcc"); | |
57 STATISTIC(NumCtorsEvaluated, "Number of static ctors evaluated"); | |
58 STATISTIC(NumNestRemoved , "Number of nest attributes removed"); | |
59 STATISTIC(NumAliasesResolved, "Number of global aliases resolved"); | |
60 STATISTIC(NumAliasesRemoved, "Number of global aliases eliminated"); | |
61 STATISTIC(NumCXXDtorsRemoved, "Number of global C++ destructors removed"); | |
62 | |
63 namespace { | |
64 struct GlobalOpt : public ModulePass { | |
65 virtual void getAnalysisUsage(AnalysisUsage &AU) const { | |
66 AU.addRequired<TargetLibraryInfo>(); | |
67 } | |
68 static char ID; // Pass identification, replacement for typeid | |
69 GlobalOpt() : ModulePass(ID) { | |
70 initializeGlobalOptPass(*PassRegistry::getPassRegistry()); | |
71 } | |
72 | |
73 bool runOnModule(Module &M); | |
74 | |
75 private: | |
76 GlobalVariable *FindGlobalCtors(Module &M); | |
77 bool OptimizeFunctions(Module &M); | |
78 bool OptimizeGlobalVars(Module &M); | |
79 bool OptimizeGlobalAliases(Module &M); | |
80 bool OptimizeGlobalCtorsList(GlobalVariable *&GCL); | |
81 bool ProcessGlobal(GlobalVariable *GV,Module::global_iterator &GVI); | |
82 bool ProcessInternalGlobal(GlobalVariable *GV,Module::global_iterator &GVI, | |
83 const GlobalStatus &GS); | |
84 bool OptimizeEmptyGlobalCXXDtors(Function *CXAAtExitFn); | |
85 | |
86 DataLayout *TD; | |
87 TargetLibraryInfo *TLI; | |
88 }; | |
89 } | |
90 | |
91 char GlobalOpt::ID = 0; | |
92 INITIALIZE_PASS_BEGIN(GlobalOpt, "globalopt", | |
93 "Global Variable Optimizer", false, false) | |
94 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo) | |
95 INITIALIZE_PASS_END(GlobalOpt, "globalopt", | |
96 "Global Variable Optimizer", false, false) | |
97 | |
98 ModulePass *llvm::createGlobalOptimizerPass() { return new GlobalOpt(); } | |
99 | |
100 /// isLeakCheckerRoot - Is this global variable possibly used by a leak checker | |
101 /// as a root? If so, we might not really want to eliminate the stores to it. | |
102 static bool isLeakCheckerRoot(GlobalVariable *GV) { | |
103 // A global variable is a root if it is a pointer, or could plausibly contain | |
104 // a pointer. There are two challenges; one is that we could have a struct | |
105 // the has an inner member which is a pointer. We recurse through the type to | |
106 // detect these (up to a point). The other is that we may actually be a union | |
107 // of a pointer and another type, and so our LLVM type is an integer which | |
108 // gets converted into a pointer, or our type is an [i8 x #] with a pointer | |
109 // potentially contained here. | |
110 | |
111 if (GV->hasPrivateLinkage()) | |
112 return false; | |
113 | |
114 SmallVector<Type *, 4> Types; | |
115 Types.push_back(cast<PointerType>(GV->getType())->getElementType()); | |
116 | |
117 unsigned Limit = 20; | |
118 do { | |
119 Type *Ty = Types.pop_back_val(); | |
120 switch (Ty->getTypeID()) { | |
121 default: break; | |
122 case Type::PointerTyID: return true; | |
123 case Type::ArrayTyID: | |
124 case Type::VectorTyID: { | |
125 SequentialType *STy = cast<SequentialType>(Ty); | |
126 Types.push_back(STy->getElementType()); | |
127 break; | |
128 } | |
129 case Type::StructTyID: { | |
130 StructType *STy = cast<StructType>(Ty); | |
131 if (STy->isOpaque()) return true; | |
132 for (StructType::element_iterator I = STy->element_begin(), | |
133 E = STy->element_end(); I != E; ++I) { | |
134 Type *InnerTy = *I; | |
135 if (isa<PointerType>(InnerTy)) return true; | |
136 if (isa<CompositeType>(InnerTy)) | |
137 Types.push_back(InnerTy); | |
138 } | |
139 break; | |
140 } | |
141 } | |
142 if (--Limit == 0) return true; | |
143 } while (!Types.empty()); | |
144 return false; | |
145 } | |
146 | |
147 /// Given a value that is stored to a global but never read, determine whether | |
148 /// it's safe to remove the store and the chain of computation that feeds the | |
149 /// store. | |
150 static bool IsSafeComputationToRemove(Value *V, const TargetLibraryInfo *TLI) { | |
151 do { | |
152 if (isa<Constant>(V)) | |
153 return true; | |
154 if (!V->hasOneUse()) | |
155 return false; | |
156 if (isa<LoadInst>(V) || isa<InvokeInst>(V) || isa<Argument>(V) || | |
157 isa<GlobalValue>(V)) | |
158 return false; | |
159 if (isAllocationFn(V, TLI)) | |
160 return true; | |
161 | |
162 Instruction *I = cast<Instruction>(V); | |
163 if (I->mayHaveSideEffects()) | |
164 return false; | |
165 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) { | |
166 if (!GEP->hasAllConstantIndices()) | |
167 return false; | |
168 } else if (I->getNumOperands() != 1) { | |
169 return false; | |
170 } | |
171 | |
172 V = I->getOperand(0); | |
173 } while (1); | |
174 } | |
175 | |
176 /// CleanupPointerRootUsers - This GV is a pointer root. Loop over all users | |
177 /// of the global and clean up any that obviously don't assign the global a | |
178 /// value that isn't dynamically allocated. | |
179 /// | |
180 static bool CleanupPointerRootUsers(GlobalVariable *GV, | |
181 const TargetLibraryInfo *TLI) { | |
182 // A brief explanation of leak checkers. The goal is to find bugs where | |
183 // pointers are forgotten, causing an accumulating growth in memory | |
184 // usage over time. The common strategy for leak checkers is to whitelist the | |
185 // memory pointed to by globals at exit. This is popular because it also | |
186 // solves another problem where the main thread of a C++ program may shut down | |
187 // before other threads that are still expecting to use those globals. To | |
188 // handle that case, we expect the program may create a singleton and never | |
189 // destroy it. | |
190 | |
191 bool Changed = false; | |
192 | |
193 // If Dead[n].first is the only use of a malloc result, we can delete its | |
194 // chain of computation and the store to the global in Dead[n].second. | |
195 SmallVector<std::pair<Instruction *, Instruction *>, 32> Dead; | |
196 | |
197 // Constants can't be pointers to dynamically allocated memory. | |
198 for (Value::use_iterator UI = GV->use_begin(), E = GV->use_end(); | |
199 UI != E;) { | |
200 User *U = *UI++; | |
201 if (StoreInst *SI = dyn_cast<StoreInst>(U)) { | |
202 Value *V = SI->getValueOperand(); | |
203 if (isa<Constant>(V)) { | |
204 Changed = true; | |
205 SI->eraseFromParent(); | |
206 } else if (Instruction *I = dyn_cast<Instruction>(V)) { | |
207 if (I->hasOneUse()) | |
208 Dead.push_back(std::make_pair(I, SI)); | |
209 } | |
210 } else if (MemSetInst *MSI = dyn_cast<MemSetInst>(U)) { | |
211 if (isa<Constant>(MSI->getValue())) { | |
212 Changed = true; | |
213 MSI->eraseFromParent(); | |
214 } else if (Instruction *I = dyn_cast<Instruction>(MSI->getValue())) { | |
215 if (I->hasOneUse()) | |
216 Dead.push_back(std::make_pair(I, MSI)); | |
217 } | |
218 } else if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(U)) { | |
219 GlobalVariable *MemSrc = dyn_cast<GlobalVariable>(MTI->getSource()); | |
220 if (MemSrc && MemSrc->isConstant()) { | |
221 Changed = true; | |
222 MTI->eraseFromParent(); | |
223 } else if (Instruction *I = dyn_cast<Instruction>(MemSrc)) { | |
224 if (I->hasOneUse()) | |
225 Dead.push_back(std::make_pair(I, MTI)); | |
226 } | |
227 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U)) { | |
228 if (CE->use_empty()) { | |
229 CE->destroyConstant(); | |
230 Changed = true; | |
231 } | |
232 } else if (Constant *C = dyn_cast<Constant>(U)) { | |
233 if (isSafeToDestroyConstant(C)) { | |
234 C->destroyConstant(); | |
235 // This could have invalidated UI, start over from scratch. | |
236 Dead.clear(); | |
237 CleanupPointerRootUsers(GV, TLI); | |
238 return true; | |
239 } | |
240 } | |
241 } | |
242 | |
243 for (int i = 0, e = Dead.size(); i != e; ++i) { | |
244 if (IsSafeComputationToRemove(Dead[i].first, TLI)) { | |
245 Dead[i].second->eraseFromParent(); | |
246 Instruction *I = Dead[i].first; | |
247 do { | |
248 if (isAllocationFn(I, TLI)) | |
249 break; | |
250 Instruction *J = dyn_cast<Instruction>(I->getOperand(0)); | |
251 if (!J) | |
252 break; | |
253 I->eraseFromParent(); | |
254 I = J; | |
255 } while (1); | |
256 I->eraseFromParent(); | |
257 } | |
258 } | |
259 | |
260 return Changed; | |
261 } | |
262 | |
263 /// CleanupConstantGlobalUsers - We just marked GV constant. Loop over all | |
264 /// users of the global, cleaning up the obvious ones. This is largely just a | |
265 /// quick scan over the use list to clean up the easy and obvious cruft. This | |
266 /// returns true if it made a change. | |
267 static bool CleanupConstantGlobalUsers(Value *V, Constant *Init, | |
268 DataLayout *TD, TargetLibraryInfo *TLI) { | |
269 bool Changed = false; | |
270 SmallVector<User*, 8> WorkList(V->use_begin(), V->use_end()); | |
271 while (!WorkList.empty()) { | |
272 User *U = WorkList.pop_back_val(); | |
273 | |
274 if (LoadInst *LI = dyn_cast<LoadInst>(U)) { | |
275 if (Init) { | |
276 // Replace the load with the initializer. | |
277 LI->replaceAllUsesWith(Init); | |
278 LI->eraseFromParent(); | |
279 Changed = true; | |
280 } | |
281 } else if (StoreInst *SI = dyn_cast<StoreInst>(U)) { | |
282 // Store must be unreachable or storing Init into the global. | |
283 SI->eraseFromParent(); | |
284 Changed = true; | |
285 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U)) { | |
286 if (CE->getOpcode() == Instruction::GetElementPtr) { | |
287 Constant *SubInit = 0; | |
288 if (Init) | |
289 SubInit = ConstantFoldLoadThroughGEPConstantExpr(Init, CE); | |
290 Changed |= CleanupConstantGlobalUsers(CE, SubInit, TD, TLI); | |
291 } else if (CE->getOpcode() == Instruction::BitCast && | |
292 CE->getType()->isPointerTy()) { | |
293 // Pointer cast, delete any stores and memsets to the global. | |
294 Changed |= CleanupConstantGlobalUsers(CE, 0, TD, TLI); | |
295 } | |
296 | |
297 if (CE->use_empty()) { | |
298 CE->destroyConstant(); | |
299 Changed = true; | |
300 } | |
301 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(U)) { | |
302 // Do not transform "gepinst (gep constexpr (GV))" here, because forming | |
303 // "gepconstexpr (gep constexpr (GV))" will cause the two gep's to fold | |
304 // and will invalidate our notion of what Init is. | |
305 Constant *SubInit = 0; | |
306 if (!isa<ConstantExpr>(GEP->getOperand(0))) { | |
307 ConstantExpr *CE = | |
308 dyn_cast_or_null<ConstantExpr>(ConstantFoldInstruction(GEP, TD, TLI)); | |
309 if (Init && CE && CE->getOpcode() == Instruction::GetElementPtr) | |
310 SubInit = ConstantFoldLoadThroughGEPConstantExpr(Init, CE); | |
311 | |
312 // If the initializer is an all-null value and we have an inbounds GEP, | |
313 // we already know what the result of any load from that GEP is. | |
314 // TODO: Handle splats. | |
315 if (Init && isa<ConstantAggregateZero>(Init) && GEP->isInBounds()) | |
316 SubInit = Constant::getNullValue(GEP->getType()->getElementType()); | |
317 } | |
318 Changed |= CleanupConstantGlobalUsers(GEP, SubInit, TD, TLI); | |
319 | |
320 if (GEP->use_empty()) { | |
321 GEP->eraseFromParent(); | |
322 Changed = true; | |
323 } | |
324 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(U)) { // memset/cpy/mv | |
325 if (MI->getRawDest() == V) { | |
326 MI->eraseFromParent(); | |
327 Changed = true; | |
328 } | |
329 | |
330 } else if (Constant *C = dyn_cast<Constant>(U)) { | |
331 // If we have a chain of dead constantexprs or other things dangling from | |
332 // us, and if they are all dead, nuke them without remorse. | |
333 if (isSafeToDestroyConstant(C)) { | |
334 C->destroyConstant(); | |
335 CleanupConstantGlobalUsers(V, Init, TD, TLI); | |
336 return true; | |
337 } | |
338 } | |
339 } | |
340 return Changed; | |
341 } | |
342 | |
343 /// isSafeSROAElementUse - Return true if the specified instruction is a safe | |
344 /// user of a derived expression from a global that we want to SROA. | |
345 static bool isSafeSROAElementUse(Value *V) { | |
346 // We might have a dead and dangling constant hanging off of here. | |
347 if (Constant *C = dyn_cast<Constant>(V)) | |
348 return isSafeToDestroyConstant(C); | |
349 | |
350 Instruction *I = dyn_cast<Instruction>(V); | |
351 if (!I) return false; | |
352 | |
353 // Loads are ok. | |
354 if (isa<LoadInst>(I)) return true; | |
355 | |
356 // Stores *to* the pointer are ok. | |
357 if (StoreInst *SI = dyn_cast<StoreInst>(I)) | |
358 return SI->getOperand(0) != V; | |
359 | |
360 // Otherwise, it must be a GEP. | |
361 GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I); | |
362 if (GEPI == 0) return false; | |
363 | |
364 if (GEPI->getNumOperands() < 3 || !isa<Constant>(GEPI->getOperand(1)) || | |
365 !cast<Constant>(GEPI->getOperand(1))->isNullValue()) | |
366 return false; | |
367 | |
368 for (Value::use_iterator I = GEPI->use_begin(), E = GEPI->use_end(); | |
369 I != E; ++I) | |
370 if (!isSafeSROAElementUse(*I)) | |
371 return false; | |
372 return true; | |
373 } | |
374 | |
375 | |
376 /// IsUserOfGlobalSafeForSRA - U is a direct user of the specified global value. | |
377 /// Look at it and its uses and decide whether it is safe to SROA this global. | |
378 /// | |
379 static bool IsUserOfGlobalSafeForSRA(User *U, GlobalValue *GV) { | |
380 // The user of the global must be a GEP Inst or a ConstantExpr GEP. | |
381 if (!isa<GetElementPtrInst>(U) && | |
382 (!isa<ConstantExpr>(U) || | |
383 cast<ConstantExpr>(U)->getOpcode() != Instruction::GetElementPtr)) | |
384 return false; | |
385 | |
386 // Check to see if this ConstantExpr GEP is SRA'able. In particular, we | |
387 // don't like < 3 operand CE's, and we don't like non-constant integer | |
388 // indices. This enforces that all uses are 'gep GV, 0, C, ...' for some | |
389 // value of C. | |
390 if (U->getNumOperands() < 3 || !isa<Constant>(U->getOperand(1)) || | |
391 !cast<Constant>(U->getOperand(1))->isNullValue() || | |
392 !isa<ConstantInt>(U->getOperand(2))) | |
393 return false; | |
394 | |
395 gep_type_iterator GEPI = gep_type_begin(U), E = gep_type_end(U); | |
396 ++GEPI; // Skip over the pointer index. | |
397 | |
398 // If this is a use of an array allocation, do a bit more checking for sanity. | |
399 if (ArrayType *AT = dyn_cast<ArrayType>(*GEPI)) { | |
400 uint64_t NumElements = AT->getNumElements(); | |
401 ConstantInt *Idx = cast<ConstantInt>(U->getOperand(2)); | |
402 | |
403 // Check to make sure that index falls within the array. If not, | |
404 // something funny is going on, so we won't do the optimization. | |
405 // | |
406 if (Idx->getZExtValue() >= NumElements) | |
407 return false; | |
408 | |
409 // We cannot scalar repl this level of the array unless any array | |
410 // sub-indices are in-range constants. In particular, consider: | |
411 // A[0][i]. We cannot know that the user isn't doing invalid things like | |
412 // allowing i to index an out-of-range subscript that accesses A[1]. | |
413 // | |
414 // Scalar replacing *just* the outer index of the array is probably not | |
415 // going to be a win anyway, so just give up. | |
416 for (++GEPI; // Skip array index. | |
417 GEPI != E; | |
418 ++GEPI) { | |
419 uint64_t NumElements; | |
420 if (ArrayType *SubArrayTy = dyn_cast<ArrayType>(*GEPI)) | |
421 NumElements = SubArrayTy->getNumElements(); | |
422 else if (VectorType *SubVectorTy = dyn_cast<VectorType>(*GEPI)) | |
423 NumElements = SubVectorTy->getNumElements(); | |
424 else { | |
425 assert((*GEPI)->isStructTy() && | |
426 "Indexed GEP type is not array, vector, or struct!"); | |
427 continue; | |
428 } | |
429 | |
430 ConstantInt *IdxVal = dyn_cast<ConstantInt>(GEPI.getOperand()); | |
431 if (!IdxVal || IdxVal->getZExtValue() >= NumElements) | |
432 return false; | |
433 } | |
434 } | |
435 | |
436 for (Value::use_iterator I = U->use_begin(), E = U->use_end(); I != E; ++I) | |
437 if (!isSafeSROAElementUse(*I)) | |
438 return false; | |
439 return true; | |
440 } | |
441 | |
442 /// GlobalUsersSafeToSRA - Look at all uses of the global and decide whether it | |
443 /// is safe for us to perform this transformation. | |
444 /// | |
445 static bool GlobalUsersSafeToSRA(GlobalValue *GV) { | |
446 for (Value::use_iterator UI = GV->use_begin(), E = GV->use_end(); | |
447 UI != E; ++UI) { | |
448 if (!IsUserOfGlobalSafeForSRA(*UI, GV)) | |
449 return false; | |
450 } | |
451 return true; | |
452 } | |
453 | |
454 | |
455 /// SRAGlobal - Perform scalar replacement of aggregates on the specified global | |
456 /// variable. This opens the door for other optimizations by exposing the | |
457 /// behavior of the program in a more fine-grained way. We have determined that | |
458 /// this transformation is safe already. We return the first global variable we | |
459 /// insert so that the caller can reprocess it. | |
460 static GlobalVariable *SRAGlobal(GlobalVariable *GV, const DataLayout &TD) { | |
461 // Make sure this global only has simple uses that we can SRA. | |
462 if (!GlobalUsersSafeToSRA(GV)) | |
463 return 0; | |
464 | |
465 assert(GV->hasLocalLinkage() && !GV->isConstant()); | |
466 Constant *Init = GV->getInitializer(); | |
467 Type *Ty = Init->getType(); | |
468 | |
469 std::vector<GlobalVariable*> NewGlobals; | |
470 Module::GlobalListType &Globals = GV->getParent()->getGlobalList(); | |
471 | |
472 // Get the alignment of the global, either explicit or target-specific. | |
473 unsigned StartAlignment = GV->getAlignment(); | |
474 if (StartAlignment == 0) | |
475 StartAlignment = TD.getABITypeAlignment(GV->getType()); | |
476 | |
477 if (StructType *STy = dyn_cast<StructType>(Ty)) { | |
478 NewGlobals.reserve(STy->getNumElements()); | |
479 const StructLayout &Layout = *TD.getStructLayout(STy); | |
480 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { | |
481 Constant *In = Init->getAggregateElement(i); | |
482 assert(In && "Couldn't get element of initializer?"); | |
483 GlobalVariable *NGV = new GlobalVariable(STy->getElementType(i), false, | |
484 GlobalVariable::InternalLinkage, | |
485 In, GV->getName()+"."+Twine(i), | |
486 GV->getThreadLocalMode(), | |
487 GV->getType()->getAddressSpace()); | |
488 Globals.insert(GV, NGV); | |
489 NewGlobals.push_back(NGV); | |
490 | |
491 // Calculate the known alignment of the field. If the original aggregate | |
492 // had 256 byte alignment for example, something might depend on that: | |
493 // propagate info to each field. | |
494 uint64_t FieldOffset = Layout.getElementOffset(i); | |
495 unsigned NewAlign = (unsigned)MinAlign(StartAlignment, FieldOffset); | |
496 if (NewAlign > TD.getABITypeAlignment(STy->getElementType(i))) | |
497 NGV->setAlignment(NewAlign); | |
498 } | |
499 } else if (SequentialType *STy = dyn_cast<SequentialType>(Ty)) { | |
500 unsigned NumElements = 0; | |
501 if (ArrayType *ATy = dyn_cast<ArrayType>(STy)) | |
502 NumElements = ATy->getNumElements(); | |
503 else | |
504 NumElements = cast<VectorType>(STy)->getNumElements(); | |
505 | |
506 if (NumElements > 16 && GV->hasNUsesOrMore(16)) | |
507 return 0; // It's not worth it. | |
508 NewGlobals.reserve(NumElements); | |
509 | |
510 uint64_t EltSize = TD.getTypeAllocSize(STy->getElementType()); | |
511 unsigned EltAlign = TD.getABITypeAlignment(STy->getElementType()); | |
512 for (unsigned i = 0, e = NumElements; i != e; ++i) { | |
513 Constant *In = Init->getAggregateElement(i); | |
514 assert(In && "Couldn't get element of initializer?"); | |
515 | |
516 GlobalVariable *NGV = new GlobalVariable(STy->getElementType(), false, | |
517 GlobalVariable::InternalLinkage, | |
518 In, GV->getName()+"."+Twine(i), | |
519 GV->getThreadLocalMode(), | |
520 GV->getType()->getAddressSpace()); | |
521 Globals.insert(GV, NGV); | |
522 NewGlobals.push_back(NGV); | |
523 | |
524 // Calculate the known alignment of the field. If the original aggregate | |
525 // had 256 byte alignment for example, something might depend on that: | |
526 // propagate info to each field. | |
527 unsigned NewAlign = (unsigned)MinAlign(StartAlignment, EltSize*i); | |
528 if (NewAlign > EltAlign) | |
529 NGV->setAlignment(NewAlign); | |
530 } | |
531 } | |
532 | |
533 if (NewGlobals.empty()) | |
534 return 0; | |
535 | |
536 DEBUG(dbgs() << "PERFORMING GLOBAL SRA ON: " << *GV); | |
537 | |
538 Constant *NullInt =Constant::getNullValue(Type::getInt32Ty(GV->getContext())); | |
539 | |
540 // Loop over all of the uses of the global, replacing the constantexpr geps, | |
541 // with smaller constantexpr geps or direct references. | |
542 while (!GV->use_empty()) { | |
543 User *GEP = GV->use_back(); | |
544 assert(((isa<ConstantExpr>(GEP) && | |
545 cast<ConstantExpr>(GEP)->getOpcode()==Instruction::GetElementPtr)|| | |
546 isa<GetElementPtrInst>(GEP)) && "NonGEP CE's are not SRAable!"); | |
547 | |
548 // Ignore the 1th operand, which has to be zero or else the program is quite | |
549 // broken (undefined). Get the 2nd operand, which is the structure or array | |
550 // index. | |
551 unsigned Val = cast<ConstantInt>(GEP->getOperand(2))->getZExtValue(); | |
552 if (Val >= NewGlobals.size()) Val = 0; // Out of bound array access. | |
553 | |
554 Value *NewPtr = NewGlobals[Val]; | |
555 | |
556 // Form a shorter GEP if needed. | |
557 if (GEP->getNumOperands() > 3) { | |
558 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(GEP)) { | |
559 SmallVector<Constant*, 8> Idxs; | |
560 Idxs.push_back(NullInt); | |
561 for (unsigned i = 3, e = CE->getNumOperands(); i != e; ++i) | |
562 Idxs.push_back(CE->getOperand(i)); | |
563 NewPtr = ConstantExpr::getGetElementPtr(cast<Constant>(NewPtr), Idxs); | |
564 } else { | |
565 GetElementPtrInst *GEPI = cast<GetElementPtrInst>(GEP); | |
566 SmallVector<Value*, 8> Idxs; | |
567 Idxs.push_back(NullInt); | |
568 for (unsigned i = 3, e = GEPI->getNumOperands(); i != e; ++i) | |
569 Idxs.push_back(GEPI->getOperand(i)); | |
570 NewPtr = GetElementPtrInst::Create(NewPtr, Idxs, | |
571 GEPI->getName()+"."+Twine(Val),GEPI); | |
572 } | |
573 } | |
574 GEP->replaceAllUsesWith(NewPtr); | |
575 | |
576 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(GEP)) | |
577 GEPI->eraseFromParent(); | |
578 else | |
579 cast<ConstantExpr>(GEP)->destroyConstant(); | |
580 } | |
581 | |
582 // Delete the old global, now that it is dead. | |
583 Globals.erase(GV); | |
584 ++NumSRA; | |
585 | |
586 // Loop over the new globals array deleting any globals that are obviously | |
587 // dead. This can arise due to scalarization of a structure or an array that | |
588 // has elements that are dead. | |
589 unsigned FirstGlobal = 0; | |
590 for (unsigned i = 0, e = NewGlobals.size(); i != e; ++i) | |
591 if (NewGlobals[i]->use_empty()) { | |
592 Globals.erase(NewGlobals[i]); | |
593 if (FirstGlobal == i) ++FirstGlobal; | |
594 } | |
595 | |
596 return FirstGlobal != NewGlobals.size() ? NewGlobals[FirstGlobal] : 0; | |
597 } | |
598 | |
599 /// AllUsesOfValueWillTrapIfNull - Return true if all users of the specified | |
600 /// value will trap if the value is dynamically null. PHIs keeps track of any | |
601 /// phi nodes we've seen to avoid reprocessing them. | |
602 static bool AllUsesOfValueWillTrapIfNull(const Value *V, | |
603 SmallPtrSet<const PHINode*, 8> &PHIs) { | |
604 for (Value::const_use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; | |
605 ++UI) { | |
606 const User *U = *UI; | |
607 | |
608 if (isa<LoadInst>(U)) { | |
609 // Will trap. | |
610 } else if (const StoreInst *SI = dyn_cast<StoreInst>(U)) { | |
611 if (SI->getOperand(0) == V) { | |
612 //cerr << "NONTRAPPING USE: " << *U; | |
613 return false; // Storing the value. | |
614 } | |
615 } else if (const CallInst *CI = dyn_cast<CallInst>(U)) { | |
616 if (CI->getCalledValue() != V) { | |
617 //cerr << "NONTRAPPING USE: " << *U; | |
618 return false; // Not calling the ptr | |
619 } | |
620 } else if (const InvokeInst *II = dyn_cast<InvokeInst>(U)) { | |
621 if (II->getCalledValue() != V) { | |
622 //cerr << "NONTRAPPING USE: " << *U; | |
623 return false; // Not calling the ptr | |
624 } | |
625 } else if (const BitCastInst *CI = dyn_cast<BitCastInst>(U)) { | |
626 if (!AllUsesOfValueWillTrapIfNull(CI, PHIs)) return false; | |
627 } else if (const GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(U)) { | |
628 if (!AllUsesOfValueWillTrapIfNull(GEPI, PHIs)) return false; | |
629 } else if (const PHINode *PN = dyn_cast<PHINode>(U)) { | |
630 // If we've already seen this phi node, ignore it, it has already been | |
631 // checked. | |
632 if (PHIs.insert(PN) && !AllUsesOfValueWillTrapIfNull(PN, PHIs)) | |
633 return false; | |
634 } else if (isa<ICmpInst>(U) && | |
635 isa<ConstantPointerNull>(UI->getOperand(1))) { | |
636 // Ignore icmp X, null | |
637 } else { | |
638 //cerr << "NONTRAPPING USE: " << *U; | |
639 return false; | |
640 } | |
641 } | |
642 return true; | |
643 } | |
644 | |
645 /// AllUsesOfLoadedValueWillTrapIfNull - Return true if all uses of any loads | |
646 /// from GV will trap if the loaded value is null. Note that this also permits | |
647 /// comparisons of the loaded value against null, as a special case. | |
648 static bool AllUsesOfLoadedValueWillTrapIfNull(const GlobalVariable *GV) { | |
649 for (Value::const_use_iterator UI = GV->use_begin(), E = GV->use_end(); | |
650 UI != E; ++UI) { | |
651 const User *U = *UI; | |
652 | |
653 if (const LoadInst *LI = dyn_cast<LoadInst>(U)) { | |
654 SmallPtrSet<const PHINode*, 8> PHIs; | |
655 if (!AllUsesOfValueWillTrapIfNull(LI, PHIs)) | |
656 return false; | |
657 } else if (isa<StoreInst>(U)) { | |
658 // Ignore stores to the global. | |
659 } else { | |
660 // We don't know or understand this user, bail out. | |
661 //cerr << "UNKNOWN USER OF GLOBAL!: " << *U; | |
662 return false; | |
663 } | |
664 } | |
665 return true; | |
666 } | |
667 | |
668 static bool OptimizeAwayTrappingUsesOfValue(Value *V, Constant *NewV) { | |
669 bool Changed = false; | |
670 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; ) { | |
671 Instruction *I = cast<Instruction>(*UI++); | |
672 if (LoadInst *LI = dyn_cast<LoadInst>(I)) { | |
673 LI->setOperand(0, NewV); | |
674 Changed = true; | |
675 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) { | |
676 if (SI->getOperand(1) == V) { | |
677 SI->setOperand(1, NewV); | |
678 Changed = true; | |
679 } | |
680 } else if (isa<CallInst>(I) || isa<InvokeInst>(I)) { | |
681 CallSite CS(I); | |
682 if (CS.getCalledValue() == V) { | |
683 // Calling through the pointer! Turn into a direct call, but be careful | |
684 // that the pointer is not also being passed as an argument. | |
685 CS.setCalledFunction(NewV); | |
686 Changed = true; | |
687 bool PassedAsArg = false; | |
688 for (unsigned i = 0, e = CS.arg_size(); i != e; ++i) | |
689 if (CS.getArgument(i) == V) { | |
690 PassedAsArg = true; | |
691 CS.setArgument(i, NewV); | |
692 } | |
693 | |
694 if (PassedAsArg) { | |
695 // Being passed as an argument also. Be careful to not invalidate UI! | |
696 UI = V->use_begin(); | |
697 } | |
698 } | |
699 } else if (CastInst *CI = dyn_cast<CastInst>(I)) { | |
700 Changed |= OptimizeAwayTrappingUsesOfValue(CI, | |
701 ConstantExpr::getCast(CI->getOpcode(), | |
702 NewV, CI->getType())); | |
703 if (CI->use_empty()) { | |
704 Changed = true; | |
705 CI->eraseFromParent(); | |
706 } | |
707 } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) { | |
708 // Should handle GEP here. | |
709 SmallVector<Constant*, 8> Idxs; | |
710 Idxs.reserve(GEPI->getNumOperands()-1); | |
711 for (User::op_iterator i = GEPI->op_begin() + 1, e = GEPI->op_end(); | |
712 i != e; ++i) | |
713 if (Constant *C = dyn_cast<Constant>(*i)) | |
714 Idxs.push_back(C); | |
715 else | |
716 break; | |
717 if (Idxs.size() == GEPI->getNumOperands()-1) | |
718 Changed |= OptimizeAwayTrappingUsesOfValue(GEPI, | |
719 ConstantExpr::getGetElementPtr(NewV, Idxs)); | |
720 if (GEPI->use_empty()) { | |
721 Changed = true; | |
722 GEPI->eraseFromParent(); | |
723 } | |
724 } | |
725 } | |
726 | |
727 return Changed; | |
728 } | |
729 | |
730 | |
731 /// OptimizeAwayTrappingUsesOfLoads - The specified global has only one non-null | |
732 /// value stored into it. If there are uses of the loaded value that would trap | |
733 /// if the loaded value is dynamically null, then we know that they cannot be | |
734 /// reachable with a null optimize away the load. | |
735 static bool OptimizeAwayTrappingUsesOfLoads(GlobalVariable *GV, Constant *LV, | |
736 DataLayout *TD, | |
737 TargetLibraryInfo *TLI) { | |
738 bool Changed = false; | |
739 | |
740 // Keep track of whether we are able to remove all the uses of the global | |
741 // other than the store that defines it. | |
742 bool AllNonStoreUsesGone = true; | |
743 | |
744 // Replace all uses of loads with uses of uses of the stored value. | |
745 for (Value::use_iterator GUI = GV->use_begin(), E = GV->use_end(); GUI != E;){ | |
746 User *GlobalUser = *GUI++; | |
747 if (LoadInst *LI = dyn_cast<LoadInst>(GlobalUser)) { | |
748 Changed |= OptimizeAwayTrappingUsesOfValue(LI, LV); | |
749 // If we were able to delete all uses of the loads | |
750 if (LI->use_empty()) { | |
751 LI->eraseFromParent(); | |
752 Changed = true; | |
753 } else { | |
754 AllNonStoreUsesGone = false; | |
755 } | |
756 } else if (isa<StoreInst>(GlobalUser)) { | |
757 // Ignore the store that stores "LV" to the global. | |
758 assert(GlobalUser->getOperand(1) == GV && | |
759 "Must be storing *to* the global"); | |
760 } else { | |
761 AllNonStoreUsesGone = false; | |
762 | |
763 // If we get here we could have other crazy uses that are transitively | |
764 // loaded. | |
765 assert((isa<PHINode>(GlobalUser) || isa<SelectInst>(GlobalUser) || | |
766 isa<ConstantExpr>(GlobalUser) || isa<CmpInst>(GlobalUser) || | |
767 isa<BitCastInst>(GlobalUser) || | |
768 isa<GetElementPtrInst>(GlobalUser)) && | |
769 "Only expect load and stores!"); | |
770 } | |
771 } | |
772 | |
773 if (Changed) { | |
774 DEBUG(dbgs() << "OPTIMIZED LOADS FROM STORED ONCE POINTER: " << *GV); | |
775 ++NumGlobUses; | |
776 } | |
777 | |
778 // If we nuked all of the loads, then none of the stores are needed either, | |
779 // nor is the global. | |
780 if (AllNonStoreUsesGone) { | |
781 if (isLeakCheckerRoot(GV)) { | |
782 Changed |= CleanupPointerRootUsers(GV, TLI); | |
783 } else { | |
784 Changed = true; | |
785 CleanupConstantGlobalUsers(GV, 0, TD, TLI); | |
786 } | |
787 if (GV->use_empty()) { | |
788 DEBUG(dbgs() << " *** GLOBAL NOW DEAD!\n"); | |
789 Changed = true; | |
790 GV->eraseFromParent(); | |
791 ++NumDeleted; | |
792 } | |
793 } | |
794 return Changed; | |
795 } | |
796 | |
797 /// ConstantPropUsersOf - Walk the use list of V, constant folding all of the | |
798 /// instructions that are foldable. | |
799 static void ConstantPropUsersOf(Value *V, | |
800 DataLayout *TD, TargetLibraryInfo *TLI) { | |
801 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; ) | |
802 if (Instruction *I = dyn_cast<Instruction>(*UI++)) | |
803 if (Constant *NewC = ConstantFoldInstruction(I, TD, TLI)) { | |
804 I->replaceAllUsesWith(NewC); | |
805 | |
806 // Advance UI to the next non-I use to avoid invalidating it! | |
807 // Instructions could multiply use V. | |
808 while (UI != E && *UI == I) | |
809 ++UI; | |
810 I->eraseFromParent(); | |
811 } | |
812 } | |
813 | |
814 /// OptimizeGlobalAddressOfMalloc - This function takes the specified global | |
815 /// variable, and transforms the program as if it always contained the result of | |
816 /// the specified malloc. Because it is always the result of the specified | |
817 /// malloc, there is no reason to actually DO the malloc. Instead, turn the | |
818 /// malloc into a global, and any loads of GV as uses of the new global. | |
819 static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV, | |
820 CallInst *CI, | |
821 Type *AllocTy, | |
822 ConstantInt *NElements, | |
823 DataLayout *TD, | |
824 TargetLibraryInfo *TLI) { | |
825 DEBUG(errs() << "PROMOTING GLOBAL: " << *GV << " CALL = " << *CI << '\n'); | |
826 | |
827 Type *GlobalType; | |
828 if (NElements->getZExtValue() == 1) | |
829 GlobalType = AllocTy; | |
830 else | |
831 // If we have an array allocation, the global variable is of an array. | |
832 GlobalType = ArrayType::get(AllocTy, NElements->getZExtValue()); | |
833 | |
834 // Create the new global variable. The contents of the malloc'd memory is | |
835 // undefined, so initialize with an undef value. | |
836 GlobalVariable *NewGV = new GlobalVariable(*GV->getParent(), | |
837 GlobalType, false, | |
838 GlobalValue::InternalLinkage, | |
839 UndefValue::get(GlobalType), | |
840 GV->getName()+".body", | |
841 GV, | |
842 GV->getThreadLocalMode()); | |
843 | |
844 // If there are bitcast users of the malloc (which is typical, usually we have | |
845 // a malloc + bitcast) then replace them with uses of the new global. Update | |
846 // other users to use the global as well. | |
847 BitCastInst *TheBC = 0; | |
848 while (!CI->use_empty()) { | |
849 Instruction *User = cast<Instruction>(CI->use_back()); | |
850 if (BitCastInst *BCI = dyn_cast<BitCastInst>(User)) { | |
851 if (BCI->getType() == NewGV->getType()) { | |
852 BCI->replaceAllUsesWith(NewGV); | |
853 BCI->eraseFromParent(); | |
854 } else { | |
855 BCI->setOperand(0, NewGV); | |
856 } | |
857 } else { | |
858 if (TheBC == 0) | |
859 TheBC = new BitCastInst(NewGV, CI->getType(), "newgv", CI); | |
860 User->replaceUsesOfWith(CI, TheBC); | |
861 } | |
862 } | |
863 | |
864 Constant *RepValue = NewGV; | |
865 if (NewGV->getType() != GV->getType()->getElementType()) | |
866 RepValue = ConstantExpr::getBitCast(RepValue, | |
867 GV->getType()->getElementType()); | |
868 | |
869 // If there is a comparison against null, we will insert a global bool to | |
870 // keep track of whether the global was initialized yet or not. | |
871 GlobalVariable *InitBool = | |
872 new GlobalVariable(Type::getInt1Ty(GV->getContext()), false, | |
873 GlobalValue::InternalLinkage, | |
874 ConstantInt::getFalse(GV->getContext()), | |
875 GV->getName()+".init", GV->getThreadLocalMode()); | |
876 bool InitBoolUsed = false; | |
877 | |
878 // Loop over all uses of GV, processing them in turn. | |
879 while (!GV->use_empty()) { | |
880 if (StoreInst *SI = dyn_cast<StoreInst>(GV->use_back())) { | |
881 // The global is initialized when the store to it occurs. | |
882 new StoreInst(ConstantInt::getTrue(GV->getContext()), InitBool, false, 0, | |
883 SI->getOrdering(), SI->getSynchScope(), SI); | |
884 SI->eraseFromParent(); | |
885 continue; | |
886 } | |
887 | |
888 LoadInst *LI = cast<LoadInst>(GV->use_back()); | |
889 while (!LI->use_empty()) { | |
890 Use &LoadUse = LI->use_begin().getUse(); | |
891 if (!isa<ICmpInst>(LoadUse.getUser())) { | |
892 LoadUse = RepValue; | |
893 continue; | |
894 } | |
895 | |
896 ICmpInst *ICI = cast<ICmpInst>(LoadUse.getUser()); | |
897 // Replace the cmp X, 0 with a use of the bool value. | |
898 // Sink the load to where the compare was, if atomic rules allow us to. | |
899 Value *LV = new LoadInst(InitBool, InitBool->getName()+".val", false, 0, | |
900 LI->getOrdering(), LI->getSynchScope(), | |
901 LI->isUnordered() ? (Instruction*)ICI : LI); | |
902 InitBoolUsed = true; | |
903 switch (ICI->getPredicate()) { | |
904 default: llvm_unreachable("Unknown ICmp Predicate!"); | |
905 case ICmpInst::ICMP_ULT: | |
906 case ICmpInst::ICMP_SLT: // X < null -> always false | |
907 LV = ConstantInt::getFalse(GV->getContext()); | |
908 break; | |
909 case ICmpInst::ICMP_ULE: | |
910 case ICmpInst::ICMP_SLE: | |
911 case ICmpInst::ICMP_EQ: | |
912 LV = BinaryOperator::CreateNot(LV, "notinit", ICI); | |
913 break; | |
914 case ICmpInst::ICMP_NE: | |
915 case ICmpInst::ICMP_UGE: | |
916 case ICmpInst::ICMP_SGE: | |
917 case ICmpInst::ICMP_UGT: | |
918 case ICmpInst::ICMP_SGT: | |
919 break; // no change. | |
920 } | |
921 ICI->replaceAllUsesWith(LV); | |
922 ICI->eraseFromParent(); | |
923 } | |
924 LI->eraseFromParent(); | |
925 } | |
926 | |
927 // If the initialization boolean was used, insert it, otherwise delete it. | |
928 if (!InitBoolUsed) { | |
929 while (!InitBool->use_empty()) // Delete initializations | |
930 cast<StoreInst>(InitBool->use_back())->eraseFromParent(); | |
931 delete InitBool; | |
932 } else | |
933 GV->getParent()->getGlobalList().insert(GV, InitBool); | |
934 | |
935 // Now the GV is dead, nuke it and the malloc.. | |
936 GV->eraseFromParent(); | |
937 CI->eraseFromParent(); | |
938 | |
939 // To further other optimizations, loop over all users of NewGV and try to | |
940 // constant prop them. This will promote GEP instructions with constant | |
941 // indices into GEP constant-exprs, which will allow global-opt to hack on it. | |
942 ConstantPropUsersOf(NewGV, TD, TLI); | |
943 if (RepValue != NewGV) | |
944 ConstantPropUsersOf(RepValue, TD, TLI); | |
945 | |
946 return NewGV; | |
947 } | |
948 | |
949 /// ValueIsOnlyUsedLocallyOrStoredToOneGlobal - Scan the use-list of V checking | |
950 /// to make sure that there are no complex uses of V. We permit simple things | |
951 /// like dereferencing the pointer, but not storing through the address, unless | |
952 /// it is to the specified global. | |
953 static bool ValueIsOnlyUsedLocallyOrStoredToOneGlobal(const Instruction *V, | |
954 const GlobalVariable *GV, | |
955 SmallPtrSet<const PHINode*, 8> &PHIs) { | |
956 for (Value::const_use_iterator UI = V->use_begin(), E = V->use_end(); | |
957 UI != E; ++UI) { | |
958 const Instruction *Inst = cast<Instruction>(*UI); | |
959 | |
960 if (isa<LoadInst>(Inst) || isa<CmpInst>(Inst)) { | |
961 continue; // Fine, ignore. | |
962 } | |
963 | |
964 if (const StoreInst *SI = dyn_cast<StoreInst>(Inst)) { | |
965 if (SI->getOperand(0) == V && SI->getOperand(1) != GV) | |
966 return false; // Storing the pointer itself... bad. | |
967 continue; // Otherwise, storing through it, or storing into GV... fine. | |
968 } | |
969 | |
970 // Must index into the array and into the struct. | |
971 if (isa<GetElementPtrInst>(Inst) && Inst->getNumOperands() >= 3) { | |
972 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(Inst, GV, PHIs)) | |
973 return false; | |
974 continue; | |
975 } | |
976 | |
977 if (const PHINode *PN = dyn_cast<PHINode>(Inst)) { | |
978 // PHIs are ok if all uses are ok. Don't infinitely recurse through PHI | |
979 // cycles. | |
980 if (PHIs.insert(PN)) | |
981 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(PN, GV, PHIs)) | |
982 return false; | |
983 continue; | |
984 } | |
985 | |
986 if (const BitCastInst *BCI = dyn_cast<BitCastInst>(Inst)) { | |
987 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(BCI, GV, PHIs)) | |
988 return false; | |
989 continue; | |
990 } | |
991 | |
992 return false; | |
993 } | |
994 return true; | |
995 } | |
996 | |
997 /// ReplaceUsesOfMallocWithGlobal - The Alloc pointer is stored into GV | |
998 /// somewhere. Transform all uses of the allocation into loads from the | |
999 /// global and uses of the resultant pointer. Further, delete the store into | |
1000 /// GV. This assumes that these value pass the | |
1001 /// 'ValueIsOnlyUsedLocallyOrStoredToOneGlobal' predicate. | |
1002 static void ReplaceUsesOfMallocWithGlobal(Instruction *Alloc, | |
1003 GlobalVariable *GV) { | |
1004 while (!Alloc->use_empty()) { | |
1005 Instruction *U = cast<Instruction>(*Alloc->use_begin()); | |
1006 Instruction *InsertPt = U; | |
1007 if (StoreInst *SI = dyn_cast<StoreInst>(U)) { | |
1008 // If this is the store of the allocation into the global, remove it. | |
1009 if (SI->getOperand(1) == GV) { | |
1010 SI->eraseFromParent(); | |
1011 continue; | |
1012 } | |
1013 } else if (PHINode *PN = dyn_cast<PHINode>(U)) { | |
1014 // Insert the load in the corresponding predecessor, not right before the | |
1015 // PHI. | |
1016 InsertPt = PN->getIncomingBlock(Alloc->use_begin())->getTerminator(); | |
1017 } else if (isa<BitCastInst>(U)) { | |
1018 // Must be bitcast between the malloc and store to initialize the global. | |
1019 ReplaceUsesOfMallocWithGlobal(U, GV); | |
1020 U->eraseFromParent(); | |
1021 continue; | |
1022 } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(U)) { | |
1023 // If this is a "GEP bitcast" and the user is a store to the global, then | |
1024 // just process it as a bitcast. | |
1025 if (GEPI->hasAllZeroIndices() && GEPI->hasOneUse()) | |
1026 if (StoreInst *SI = dyn_cast<StoreInst>(GEPI->use_back())) | |
1027 if (SI->getOperand(1) == GV) { | |
1028 // Must be bitcast GEP between the malloc and store to initialize | |
1029 // the global. | |
1030 ReplaceUsesOfMallocWithGlobal(GEPI, GV); | |
1031 GEPI->eraseFromParent(); | |
1032 continue; | |
1033 } | |
1034 } | |
1035 | |
1036 // Insert a load from the global, and use it instead of the malloc. | |
1037 Value *NL = new LoadInst(GV, GV->getName()+".val", InsertPt); | |
1038 U->replaceUsesOfWith(Alloc, NL); | |
1039 } | |
1040 } | |
1041 | |
1042 /// LoadUsesSimpleEnoughForHeapSRA - Verify that all uses of V (a load, or a phi | |
1043 /// of a load) are simple enough to perform heap SRA on. This permits GEP's | |
1044 /// that index through the array and struct field, icmps of null, and PHIs. | |
1045 static bool LoadUsesSimpleEnoughForHeapSRA(const Value *V, | |
1046 SmallPtrSet<const PHINode*, 32> &LoadUsingPHIs, | |
1047 SmallPtrSet<const PHINode*, 32> &LoadUsingPHIsPerLoad) { | |
1048 // We permit two users of the load: setcc comparing against the null | |
1049 // pointer, and a getelementptr of a specific form. | |
1050 for (Value::const_use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; | |
1051 ++UI) { | |
1052 const Instruction *User = cast<Instruction>(*UI); | |
1053 | |
1054 // Comparison against null is ok. | |
1055 if (const ICmpInst *ICI = dyn_cast<ICmpInst>(User)) { | |
1056 if (!isa<ConstantPointerNull>(ICI->getOperand(1))) | |
1057 return false; | |
1058 continue; | |
1059 } | |
1060 | |
1061 // getelementptr is also ok, but only a simple form. | |
1062 if (const GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(User)) { | |
1063 // Must index into the array and into the struct. | |
1064 if (GEPI->getNumOperands() < 3) | |
1065 return false; | |
1066 | |
1067 // Otherwise the GEP is ok. | |
1068 continue; | |
1069 } | |
1070 | |
1071 if (const PHINode *PN = dyn_cast<PHINode>(User)) { | |
1072 if (!LoadUsingPHIsPerLoad.insert(PN)) | |
1073 // This means some phi nodes are dependent on each other. | |
1074 // Avoid infinite looping! | |
1075 return false; | |
1076 if (!LoadUsingPHIs.insert(PN)) | |
1077 // If we have already analyzed this PHI, then it is safe. | |
1078 continue; | |
1079 | |
1080 // Make sure all uses of the PHI are simple enough to transform. | |
1081 if (!LoadUsesSimpleEnoughForHeapSRA(PN, | |
1082 LoadUsingPHIs, LoadUsingPHIsPerLoad)) | |
1083 return false; | |
1084 | |
1085 continue; | |
1086 } | |
1087 | |
1088 // Otherwise we don't know what this is, not ok. | |
1089 return false; | |
1090 } | |
1091 | |
1092 return true; | |
1093 } | |
1094 | |
1095 | |
1096 /// AllGlobalLoadUsesSimpleEnoughForHeapSRA - If all users of values loaded from | |
1097 /// GV are simple enough to perform HeapSRA, return true. | |
1098 static bool AllGlobalLoadUsesSimpleEnoughForHeapSRA(const GlobalVariable *GV, | |
1099 Instruction *StoredVal) { | |
1100 SmallPtrSet<const PHINode*, 32> LoadUsingPHIs; | |
1101 SmallPtrSet<const PHINode*, 32> LoadUsingPHIsPerLoad; | |
1102 for (Value::const_use_iterator UI = GV->use_begin(), E = GV->use_end(); | |
1103 UI != E; ++UI) | |
1104 if (const LoadInst *LI = dyn_cast<LoadInst>(*UI)) { | |
1105 if (!LoadUsesSimpleEnoughForHeapSRA(LI, LoadUsingPHIs, | |
1106 LoadUsingPHIsPerLoad)) | |
1107 return false; | |
1108 LoadUsingPHIsPerLoad.clear(); | |
1109 } | |
1110 | |
1111 // If we reach here, we know that all uses of the loads and transitive uses | |
1112 // (through PHI nodes) are simple enough to transform. However, we don't know | |
1113 // that all inputs the to the PHI nodes are in the same equivalence sets. | |
1114 // Check to verify that all operands of the PHIs are either PHIS that can be | |
1115 // transformed, loads from GV, or MI itself. | |
1116 for (SmallPtrSet<const PHINode*, 32>::const_iterator I = LoadUsingPHIs.begin() | |
1117 , E = LoadUsingPHIs.end(); I != E; ++I) { | |
1118 const PHINode *PN = *I; | |
1119 for (unsigned op = 0, e = PN->getNumIncomingValues(); op != e; ++op) { | |
1120 Value *InVal = PN->getIncomingValue(op); | |
1121 | |
1122 // PHI of the stored value itself is ok. | |
1123 if (InVal == StoredVal) continue; | |
1124 | |
1125 if (const PHINode *InPN = dyn_cast<PHINode>(InVal)) { | |
1126 // One of the PHIs in our set is (optimistically) ok. | |
1127 if (LoadUsingPHIs.count(InPN)) | |
1128 continue; | |
1129 return false; | |
1130 } | |
1131 | |
1132 // Load from GV is ok. | |
1133 if (const LoadInst *LI = dyn_cast<LoadInst>(InVal)) | |
1134 if (LI->getOperand(0) == GV) | |
1135 continue; | |
1136 | |
1137 // UNDEF? NULL? | |
1138 | |
1139 // Anything else is rejected. | |
1140 return false; | |
1141 } | |
1142 } | |
1143 | |
1144 return true; | |
1145 } | |
1146 | |
1147 static Value *GetHeapSROAValue(Value *V, unsigned FieldNo, | |
1148 DenseMap<Value*, std::vector<Value*> > &InsertedScalarizedValues, | |
1149 std::vector<std::pair<PHINode*, unsigned> > &PHIsToRewrite) { | |
1150 std::vector<Value*> &FieldVals = InsertedScalarizedValues[V]; | |
1151 | |
1152 if (FieldNo >= FieldVals.size()) | |
1153 FieldVals.resize(FieldNo+1); | |
1154 | |
1155 // If we already have this value, just reuse the previously scalarized | |
1156 // version. | |
1157 if (Value *FieldVal = FieldVals[FieldNo]) | |
1158 return FieldVal; | |
1159 | |
1160 // Depending on what instruction this is, we have several cases. | |
1161 Value *Result; | |
1162 if (LoadInst *LI = dyn_cast<LoadInst>(V)) { | |
1163 // This is a scalarized version of the load from the global. Just create | |
1164 // a new Load of the scalarized global. | |
1165 Result = new LoadInst(GetHeapSROAValue(LI->getOperand(0), FieldNo, | |
1166 InsertedScalarizedValues, | |
1167 PHIsToRewrite), | |
1168 LI->getName()+".f"+Twine(FieldNo), LI); | |
1169 } else if (PHINode *PN = dyn_cast<PHINode>(V)) { | |
1170 // PN's type is pointer to struct. Make a new PHI of pointer to struct | |
1171 // field. | |
1172 StructType *ST = cast<StructType>(PN->getType()->getPointerElementType()); | |
1173 | |
1174 PHINode *NewPN = | |
1175 PHINode::Create(PointerType::getUnqual(ST->getElementType(FieldNo)), | |
1176 PN->getNumIncomingValues(), | |
1177 PN->getName()+".f"+Twine(FieldNo), PN); | |
1178 Result = NewPN; | |
1179 PHIsToRewrite.push_back(std::make_pair(PN, FieldNo)); | |
1180 } else { | |
1181 llvm_unreachable("Unknown usable value"); | |
1182 } | |
1183 | |
1184 return FieldVals[FieldNo] = Result; | |
1185 } | |
1186 | |
1187 /// RewriteHeapSROALoadUser - Given a load instruction and a value derived from | |
1188 /// the load, rewrite the derived value to use the HeapSRoA'd load. | |
1189 static void RewriteHeapSROALoadUser(Instruction *LoadUser, | |
1190 DenseMap<Value*, std::vector<Value*> > &InsertedScalarizedValues, | |
1191 std::vector<std::pair<PHINode*, unsigned> > &PHIsToRewrite) { | |
1192 // If this is a comparison against null, handle it. | |
1193 if (ICmpInst *SCI = dyn_cast<ICmpInst>(LoadUser)) { | |
1194 assert(isa<ConstantPointerNull>(SCI->getOperand(1))); | |
1195 // If we have a setcc of the loaded pointer, we can use a setcc of any | |
1196 // field. | |
1197 Value *NPtr = GetHeapSROAValue(SCI->getOperand(0), 0, | |
1198 InsertedScalarizedValues, PHIsToRewrite); | |
1199 | |
1200 Value *New = new ICmpInst(SCI, SCI->getPredicate(), NPtr, | |
1201 Constant::getNullValue(NPtr->getType()), | |
1202 SCI->getName()); | |
1203 SCI->replaceAllUsesWith(New); | |
1204 SCI->eraseFromParent(); | |
1205 return; | |
1206 } | |
1207 | |
1208 // Handle 'getelementptr Ptr, Idx, i32 FieldNo ...' | |
1209 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(LoadUser)) { | |
1210 assert(GEPI->getNumOperands() >= 3 && isa<ConstantInt>(GEPI->getOperand(2)) | |
1211 && "Unexpected GEPI!"); | |
1212 | |
1213 // Load the pointer for this field. | |
1214 unsigned FieldNo = cast<ConstantInt>(GEPI->getOperand(2))->getZExtValue(); | |
1215 Value *NewPtr = GetHeapSROAValue(GEPI->getOperand(0), FieldNo, | |
1216 InsertedScalarizedValues, PHIsToRewrite); | |
1217 | |
1218 // Create the new GEP idx vector. | |
1219 SmallVector<Value*, 8> GEPIdx; | |
1220 GEPIdx.push_back(GEPI->getOperand(1)); | |
1221 GEPIdx.append(GEPI->op_begin()+3, GEPI->op_end()); | |
1222 | |
1223 Value *NGEPI = GetElementPtrInst::Create(NewPtr, GEPIdx, | |
1224 GEPI->getName(), GEPI); | |
1225 GEPI->replaceAllUsesWith(NGEPI); | |
1226 GEPI->eraseFromParent(); | |
1227 return; | |
1228 } | |
1229 | |
1230 // Recursively transform the users of PHI nodes. This will lazily create the | |
1231 // PHIs that are needed for individual elements. Keep track of what PHIs we | |
1232 // see in InsertedScalarizedValues so that we don't get infinite loops (very | |
1233 // antisocial). If the PHI is already in InsertedScalarizedValues, it has | |
1234 // already been seen first by another load, so its uses have already been | |
1235 // processed. | |
1236 PHINode *PN = cast<PHINode>(LoadUser); | |
1237 if (!InsertedScalarizedValues.insert(std::make_pair(PN, | |
1238 std::vector<Value*>())).second) | |
1239 return; | |
1240 | |
1241 // If this is the first time we've seen this PHI, recursively process all | |
1242 // users. | |
1243 for (Value::use_iterator UI = PN->use_begin(), E = PN->use_end(); UI != E; ) { | |
1244 Instruction *User = cast<Instruction>(*UI++); | |
1245 RewriteHeapSROALoadUser(User, InsertedScalarizedValues, PHIsToRewrite); | |
1246 } | |
1247 } | |
1248 | |
1249 /// RewriteUsesOfLoadForHeapSRoA - We are performing Heap SRoA on a global. Ptr | |
1250 /// is a value loaded from the global. Eliminate all uses of Ptr, making them | |
1251 /// use FieldGlobals instead. All uses of loaded values satisfy | |
1252 /// AllGlobalLoadUsesSimpleEnoughForHeapSRA. | |
1253 static void RewriteUsesOfLoadForHeapSRoA(LoadInst *Load, | |
1254 DenseMap<Value*, std::vector<Value*> > &InsertedScalarizedValues, | |
1255 std::vector<std::pair<PHINode*, unsigned> > &PHIsToRewrite) { | |
1256 for (Value::use_iterator UI = Load->use_begin(), E = Load->use_end(); | |
1257 UI != E; ) { | |
1258 Instruction *User = cast<Instruction>(*UI++); | |
1259 RewriteHeapSROALoadUser(User, InsertedScalarizedValues, PHIsToRewrite); | |
1260 } | |
1261 | |
1262 if (Load->use_empty()) { | |
1263 Load->eraseFromParent(); | |
1264 InsertedScalarizedValues.erase(Load); | |
1265 } | |
1266 } | |
1267 | |
1268 /// PerformHeapAllocSRoA - CI is an allocation of an array of structures. Break | |
1269 /// it up into multiple allocations of arrays of the fields. | |
1270 static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, CallInst *CI, | |
1271 Value *NElems, DataLayout *TD, | |
1272 const TargetLibraryInfo *TLI) { | |
1273 DEBUG(dbgs() << "SROA HEAP ALLOC: " << *GV << " MALLOC = " << *CI << '\n'); | |
1274 Type *MAT = getMallocAllocatedType(CI, TLI); | |
1275 StructType *STy = cast<StructType>(MAT); | |
1276 | |
1277 // There is guaranteed to be at least one use of the malloc (storing | |
1278 // it into GV). If there are other uses, change them to be uses of | |
1279 // the global to simplify later code. This also deletes the store | |
1280 // into GV. | |
1281 ReplaceUsesOfMallocWithGlobal(CI, GV); | |
1282 | |
1283 // Okay, at this point, there are no users of the malloc. Insert N | |
1284 // new mallocs at the same place as CI, and N globals. | |
1285 std::vector<Value*> FieldGlobals; | |
1286 std::vector<Value*> FieldMallocs; | |
1287 | |
1288 for (unsigned FieldNo = 0, e = STy->getNumElements(); FieldNo != e;++FieldNo){ | |
1289 Type *FieldTy = STy->getElementType(FieldNo); | |
1290 PointerType *PFieldTy = PointerType::getUnqual(FieldTy); | |
1291 | |
1292 GlobalVariable *NGV = | |
1293 new GlobalVariable(*GV->getParent(), | |
1294 PFieldTy, false, GlobalValue::InternalLinkage, | |
1295 Constant::getNullValue(PFieldTy), | |
1296 GV->getName() + ".f" + Twine(FieldNo), GV, | |
1297 GV->getThreadLocalMode()); | |
1298 FieldGlobals.push_back(NGV); | |
1299 | |
1300 unsigned TypeSize = TD->getTypeAllocSize(FieldTy); | |
1301 if (StructType *ST = dyn_cast<StructType>(FieldTy)) | |
1302 TypeSize = TD->getStructLayout(ST)->getSizeInBytes(); | |
1303 Type *IntPtrTy = TD->getIntPtrType(CI->getType()); | |
1304 Value *NMI = CallInst::CreateMalloc(CI, IntPtrTy, FieldTy, | |
1305 ConstantInt::get(IntPtrTy, TypeSize), | |
1306 NElems, 0, | |
1307 CI->getName() + ".f" + Twine(FieldNo)); | |
1308 FieldMallocs.push_back(NMI); | |
1309 new StoreInst(NMI, NGV, CI); | |
1310 } | |
1311 | |
1312 // The tricky aspect of this transformation is handling the case when malloc | |
1313 // fails. In the original code, malloc failing would set the result pointer | |
1314 // of malloc to null. In this case, some mallocs could succeed and others | |
1315 // could fail. As such, we emit code that looks like this: | |
1316 // F0 = malloc(field0) | |
1317 // F1 = malloc(field1) | |
1318 // F2 = malloc(field2) | |
1319 // if (F0 == 0 || F1 == 0 || F2 == 0) { | |
1320 // if (F0) { free(F0); F0 = 0; } | |
1321 // if (F1) { free(F1); F1 = 0; } | |
1322 // if (F2) { free(F2); F2 = 0; } | |
1323 // } | |
1324 // The malloc can also fail if its argument is too large. | |
1325 Constant *ConstantZero = ConstantInt::get(CI->getArgOperand(0)->getType(), 0); | |
1326 Value *RunningOr = new ICmpInst(CI, ICmpInst::ICMP_SLT, CI->getArgOperand(0), | |
1327 ConstantZero, "isneg"); | |
1328 for (unsigned i = 0, e = FieldMallocs.size(); i != e; ++i) { | |
1329 Value *Cond = new ICmpInst(CI, ICmpInst::ICMP_EQ, FieldMallocs[i], | |
1330 Constant::getNullValue(FieldMallocs[i]->getType()), | |
1331 "isnull"); | |
1332 RunningOr = BinaryOperator::CreateOr(RunningOr, Cond, "tmp", CI); | |
1333 } | |
1334 | |
1335 // Split the basic block at the old malloc. | |
1336 BasicBlock *OrigBB = CI->getParent(); | |
1337 BasicBlock *ContBB = OrigBB->splitBasicBlock(CI, "malloc_cont"); | |
1338 | |
1339 // Create the block to check the first condition. Put all these blocks at the | |
1340 // end of the function as they are unlikely to be executed. | |
1341 BasicBlock *NullPtrBlock = BasicBlock::Create(OrigBB->getContext(), | |
1342 "malloc_ret_null", | |
1343 OrigBB->getParent()); | |
1344 | |
1345 // Remove the uncond branch from OrigBB to ContBB, turning it into a cond | |
1346 // branch on RunningOr. | |
1347 OrigBB->getTerminator()->eraseFromParent(); | |
1348 BranchInst::Create(NullPtrBlock, ContBB, RunningOr, OrigBB); | |
1349 | |
1350 // Within the NullPtrBlock, we need to emit a comparison and branch for each | |
1351 // pointer, because some may be null while others are not. | |
1352 for (unsigned i = 0, e = FieldGlobals.size(); i != e; ++i) { | |
1353 Value *GVVal = new LoadInst(FieldGlobals[i], "tmp", NullPtrBlock); | |
1354 Value *Cmp = new ICmpInst(*NullPtrBlock, ICmpInst::ICMP_NE, GVVal, | |
1355 Constant::getNullValue(GVVal->getType())); | |
1356 BasicBlock *FreeBlock = BasicBlock::Create(Cmp->getContext(), "free_it", | |
1357 OrigBB->getParent()); | |
1358 BasicBlock *NextBlock = BasicBlock::Create(Cmp->getContext(), "next", | |
1359 OrigBB->getParent()); | |
1360 Instruction *BI = BranchInst::Create(FreeBlock, NextBlock, | |
1361 Cmp, NullPtrBlock); | |
1362 | |
1363 // Fill in FreeBlock. | |
1364 CallInst::CreateFree(GVVal, BI); | |
1365 new StoreInst(Constant::getNullValue(GVVal->getType()), FieldGlobals[i], | |
1366 FreeBlock); | |
1367 BranchInst::Create(NextBlock, FreeBlock); | |
1368 | |
1369 NullPtrBlock = NextBlock; | |
1370 } | |
1371 | |
1372 BranchInst::Create(ContBB, NullPtrBlock); | |
1373 | |
1374 // CI is no longer needed, remove it. | |
1375 CI->eraseFromParent(); | |
1376 | |
1377 /// InsertedScalarizedLoads - As we process loads, if we can't immediately | |
1378 /// update all uses of the load, keep track of what scalarized loads are | |
1379 /// inserted for a given load. | |
1380 DenseMap<Value*, std::vector<Value*> > InsertedScalarizedValues; | |
1381 InsertedScalarizedValues[GV] = FieldGlobals; | |
1382 | |
1383 std::vector<std::pair<PHINode*, unsigned> > PHIsToRewrite; | |
1384 | |
1385 // Okay, the malloc site is completely handled. All of the uses of GV are now | |
1386 // loads, and all uses of those loads are simple. Rewrite them to use loads | |
1387 // of the per-field globals instead. | |
1388 for (Value::use_iterator UI = GV->use_begin(), E = GV->use_end(); UI != E;) { | |
1389 Instruction *User = cast<Instruction>(*UI++); | |
1390 | |
1391 if (LoadInst *LI = dyn_cast<LoadInst>(User)) { | |
1392 RewriteUsesOfLoadForHeapSRoA(LI, InsertedScalarizedValues, PHIsToRewrite); | |
1393 continue; | |
1394 } | |
1395 | |
1396 // Must be a store of null. | |
1397 StoreInst *SI = cast<StoreInst>(User); | |
1398 assert(isa<ConstantPointerNull>(SI->getOperand(0)) && | |
1399 "Unexpected heap-sra user!"); | |
1400 | |
1401 // Insert a store of null into each global. | |
1402 for (unsigned i = 0, e = FieldGlobals.size(); i != e; ++i) { | |
1403 PointerType *PT = cast<PointerType>(FieldGlobals[i]->getType()); | |
1404 Constant *Null = Constant::getNullValue(PT->getElementType()); | |
1405 new StoreInst(Null, FieldGlobals[i], SI); | |
1406 } | |
1407 // Erase the original store. | |
1408 SI->eraseFromParent(); | |
1409 } | |
1410 | |
1411 // While we have PHIs that are interesting to rewrite, do it. | |
1412 while (!PHIsToRewrite.empty()) { | |
1413 PHINode *PN = PHIsToRewrite.back().first; | |
1414 unsigned FieldNo = PHIsToRewrite.back().second; | |
1415 PHIsToRewrite.pop_back(); | |
1416 PHINode *FieldPN = cast<PHINode>(InsertedScalarizedValues[PN][FieldNo]); | |
1417 assert(FieldPN->getNumIncomingValues() == 0 &&"Already processed this phi"); | |
1418 | |
1419 // Add all the incoming values. This can materialize more phis. | |
1420 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) { | |
1421 Value *InVal = PN->getIncomingValue(i); | |
1422 InVal = GetHeapSROAValue(InVal, FieldNo, InsertedScalarizedValues, | |
1423 PHIsToRewrite); | |
1424 FieldPN->addIncoming(InVal, PN->getIncomingBlock(i)); | |
1425 } | |
1426 } | |
1427 | |
1428 // Drop all inter-phi links and any loads that made it this far. | |
1429 for (DenseMap<Value*, std::vector<Value*> >::iterator | |
1430 I = InsertedScalarizedValues.begin(), E = InsertedScalarizedValues.end(); | |
1431 I != E; ++I) { | |
1432 if (PHINode *PN = dyn_cast<PHINode>(I->first)) | |
1433 PN->dropAllReferences(); | |
1434 else if (LoadInst *LI = dyn_cast<LoadInst>(I->first)) | |
1435 LI->dropAllReferences(); | |
1436 } | |
1437 | |
1438 // Delete all the phis and loads now that inter-references are dead. | |
1439 for (DenseMap<Value*, std::vector<Value*> >::iterator | |
1440 I = InsertedScalarizedValues.begin(), E = InsertedScalarizedValues.end(); | |
1441 I != E; ++I) { | |
1442 if (PHINode *PN = dyn_cast<PHINode>(I->first)) | |
1443 PN->eraseFromParent(); | |
1444 else if (LoadInst *LI = dyn_cast<LoadInst>(I->first)) | |
1445 LI->eraseFromParent(); | |
1446 } | |
1447 | |
1448 // The old global is now dead, remove it. | |
1449 GV->eraseFromParent(); | |
1450 | |
1451 ++NumHeapSRA; | |
1452 return cast<GlobalVariable>(FieldGlobals[0]); | |
1453 } | |
1454 | |
1455 /// TryToOptimizeStoreOfMallocToGlobal - This function is called when we see a | |
1456 /// pointer global variable with a single value stored it that is a malloc or | |
1457 /// cast of malloc. | |
1458 static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV, | |
1459 CallInst *CI, | |
1460 Type *AllocTy, | |
1461 AtomicOrdering Ordering, | |
1462 Module::global_iterator &GVI, | |
1463 DataLayout *TD, | |
1464 TargetLibraryInfo *TLI) { | |
1465 if (!TD) | |
1466 return false; | |
1467 | |
1468 // If this is a malloc of an abstract type, don't touch it. | |
1469 if (!AllocTy->isSized()) | |
1470 return false; | |
1471 | |
1472 // We can't optimize this global unless all uses of it are *known* to be | |
1473 // of the malloc value, not of the null initializer value (consider a use | |
1474 // that compares the global's value against zero to see if the malloc has | |
1475 // been reached). To do this, we check to see if all uses of the global | |
1476 // would trap if the global were null: this proves that they must all | |
1477 // happen after the malloc. | |
1478 if (!AllUsesOfLoadedValueWillTrapIfNull(GV)) | |
1479 return false; | |
1480 | |
1481 // We can't optimize this if the malloc itself is used in a complex way, | |
1482 // for example, being stored into multiple globals. This allows the | |
1483 // malloc to be stored into the specified global, loaded icmp'd, and | |
1484 // GEP'd. These are all things we could transform to using the global | |
1485 // for. | |
1486 SmallPtrSet<const PHINode*, 8> PHIs; | |
1487 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(CI, GV, PHIs)) | |
1488 return false; | |
1489 | |
1490 // If we have a global that is only initialized with a fixed size malloc, | |
1491 // transform the program to use global memory instead of malloc'd memory. | |
1492 // This eliminates dynamic allocation, avoids an indirection accessing the | |
1493 // data, and exposes the resultant global to further GlobalOpt. | |
1494 // We cannot optimize the malloc if we cannot determine malloc array size. | |
1495 Value *NElems = getMallocArraySize(CI, TD, TLI, true); | |
1496 if (!NElems) | |
1497 return false; | |
1498 | |
1499 if (ConstantInt *NElements = dyn_cast<ConstantInt>(NElems)) | |
1500 // Restrict this transformation to only working on small allocations | |
1501 // (2048 bytes currently), as we don't want to introduce a 16M global or | |
1502 // something. | |
1503 if (NElements->getZExtValue() * TD->getTypeAllocSize(AllocTy) < 2048) { | |
1504 GVI = OptimizeGlobalAddressOfMalloc(GV, CI, AllocTy, NElements, TD, TLI); | |
1505 return true; | |
1506 } | |
1507 | |
1508 // If the allocation is an array of structures, consider transforming this | |
1509 // into multiple malloc'd arrays, one for each field. This is basically | |
1510 // SRoA for malloc'd memory. | |
1511 | |
1512 if (Ordering != NotAtomic) | |
1513 return false; | |
1514 | |
1515 // If this is an allocation of a fixed size array of structs, analyze as a | |
1516 // variable size array. malloc [100 x struct],1 -> malloc struct, 100 | |
1517 if (NElems == ConstantInt::get(CI->getArgOperand(0)->getType(), 1)) | |
1518 if (ArrayType *AT = dyn_cast<ArrayType>(AllocTy)) | |
1519 AllocTy = AT->getElementType(); | |
1520 | |
1521 StructType *AllocSTy = dyn_cast<StructType>(AllocTy); | |
1522 if (!AllocSTy) | |
1523 return false; | |
1524 | |
1525 // This the structure has an unreasonable number of fields, leave it | |
1526 // alone. | |
1527 if (AllocSTy->getNumElements() <= 16 && AllocSTy->getNumElements() != 0 && | |
1528 AllGlobalLoadUsesSimpleEnoughForHeapSRA(GV, CI)) { | |
1529 | |
1530 // If this is a fixed size array, transform the Malloc to be an alloc of | |
1531 // structs. malloc [100 x struct],1 -> malloc struct, 100 | |
1532 if (ArrayType *AT = dyn_cast<ArrayType>(getMallocAllocatedType(CI, TLI))) { | |
1533 Type *IntPtrTy = TD->getIntPtrType(CI->getType()); | |
1534 unsigned TypeSize = TD->getStructLayout(AllocSTy)->getSizeInBytes(); | |
1535 Value *AllocSize = ConstantInt::get(IntPtrTy, TypeSize); | |
1536 Value *NumElements = ConstantInt::get(IntPtrTy, AT->getNumElements()); | |
1537 Instruction *Malloc = CallInst::CreateMalloc(CI, IntPtrTy, AllocSTy, | |
1538 AllocSize, NumElements, | |
1539 0, CI->getName()); | |
1540 Instruction *Cast = new BitCastInst(Malloc, CI->getType(), "tmp", CI); | |
1541 CI->replaceAllUsesWith(Cast); | |
1542 CI->eraseFromParent(); | |
1543 if (BitCastInst *BCI = dyn_cast<BitCastInst>(Malloc)) | |
1544 CI = cast<CallInst>(BCI->getOperand(0)); | |
1545 else | |
1546 CI = cast<CallInst>(Malloc); | |
1547 } | |
1548 | |
1549 GVI = PerformHeapAllocSRoA(GV, CI, getMallocArraySize(CI, TD, TLI, true), | |
1550 TD, TLI); | |
1551 return true; | |
1552 } | |
1553 | |
1554 return false; | |
1555 } | |
1556 | |
1557 // OptimizeOnceStoredGlobal - Try to optimize globals based on the knowledge | |
1558 // that only one value (besides its initializer) is ever stored to the global. | |
1559 static bool OptimizeOnceStoredGlobal(GlobalVariable *GV, Value *StoredOnceVal, | |
1560 AtomicOrdering Ordering, | |
1561 Module::global_iterator &GVI, | |
1562 DataLayout *TD, TargetLibraryInfo *TLI) { | |
1563 // Ignore no-op GEPs and bitcasts. | |
1564 StoredOnceVal = StoredOnceVal->stripPointerCasts(); | |
1565 | |
1566 // If we are dealing with a pointer global that is initialized to null and | |
1567 // only has one (non-null) value stored into it, then we can optimize any | |
1568 // users of the loaded value (often calls and loads) that would trap if the | |
1569 // value was null. | |
1570 if (GV->getInitializer()->getType()->isPointerTy() && | |
1571 GV->getInitializer()->isNullValue()) { | |
1572 if (Constant *SOVC = dyn_cast<Constant>(StoredOnceVal)) { | |
1573 if (GV->getInitializer()->getType() != SOVC->getType()) | |
1574 SOVC = ConstantExpr::getBitCast(SOVC, GV->getInitializer()->getType()); | |
1575 | |
1576 // Optimize away any trapping uses of the loaded value. | |
1577 if (OptimizeAwayTrappingUsesOfLoads(GV, SOVC, TD, TLI)) | |
1578 return true; | |
1579 } else if (CallInst *CI = extractMallocCall(StoredOnceVal, TLI)) { | |
1580 Type *MallocType = getMallocAllocatedType(CI, TLI); | |
1581 if (MallocType && | |
1582 TryToOptimizeStoreOfMallocToGlobal(GV, CI, MallocType, Ordering, GVI, | |
1583 TD, TLI)) | |
1584 return true; | |
1585 } | |
1586 } | |
1587 | |
1588 return false; | |
1589 } | |
1590 | |
1591 /// TryToShrinkGlobalToBoolean - At this point, we have learned that the only | |
1592 /// two values ever stored into GV are its initializer and OtherVal. See if we | |
1593 /// can shrink the global into a boolean and select between the two values | |
1594 /// whenever it is used. This exposes the values to other scalar optimizations. | |
1595 static bool TryToShrinkGlobalToBoolean(GlobalVariable *GV, Constant *OtherVal) { | |
1596 Type *GVElType = GV->getType()->getElementType(); | |
1597 | |
1598 // If GVElType is already i1, it is already shrunk. If the type of the GV is | |
1599 // an FP value, pointer or vector, don't do this optimization because a select | |
1600 // between them is very expensive and unlikely to lead to later | |
1601 // simplification. In these cases, we typically end up with "cond ? v1 : v2" | |
1602 // where v1 and v2 both require constant pool loads, a big loss. | |
1603 if (GVElType == Type::getInt1Ty(GV->getContext()) || | |
1604 GVElType->isFloatingPointTy() || | |
1605 GVElType->isPointerTy() || GVElType->isVectorTy()) | |
1606 return false; | |
1607 | |
1608 // Walk the use list of the global seeing if all the uses are load or store. | |
1609 // If there is anything else, bail out. | |
1610 for (Value::use_iterator I = GV->use_begin(), E = GV->use_end(); I != E; ++I){ | |
1611 User *U = *I; | |
1612 if (!isa<LoadInst>(U) && !isa<StoreInst>(U)) | |
1613 return false; | |
1614 } | |
1615 | |
1616 DEBUG(dbgs() << " *** SHRINKING TO BOOL: " << *GV); | |
1617 | |
1618 // Create the new global, initializing it to false. | |
1619 GlobalVariable *NewGV = new GlobalVariable(Type::getInt1Ty(GV->getContext()), | |
1620 false, | |
1621 GlobalValue::InternalLinkage, | |
1622 ConstantInt::getFalse(GV->getContext()), | |
1623 GV->getName()+".b", | |
1624 GV->getThreadLocalMode(), | |
1625 GV->getType()->getAddressSpace()); | |
1626 GV->getParent()->getGlobalList().insert(GV, NewGV); | |
1627 | |
1628 Constant *InitVal = GV->getInitializer(); | |
1629 assert(InitVal->getType() != Type::getInt1Ty(GV->getContext()) && | |
1630 "No reason to shrink to bool!"); | |
1631 | |
1632 // If initialized to zero and storing one into the global, we can use a cast | |
1633 // instead of a select to synthesize the desired value. | |
1634 bool IsOneZero = false; | |
1635 if (ConstantInt *CI = dyn_cast<ConstantInt>(OtherVal)) | |
1636 IsOneZero = InitVal->isNullValue() && CI->isOne(); | |
1637 | |
1638 while (!GV->use_empty()) { | |
1639 Instruction *UI = cast<Instruction>(GV->use_back()); | |
1640 if (StoreInst *SI = dyn_cast<StoreInst>(UI)) { | |
1641 // Change the store into a boolean store. | |
1642 bool StoringOther = SI->getOperand(0) == OtherVal; | |
1643 // Only do this if we weren't storing a loaded value. | |
1644 Value *StoreVal; | |
1645 if (StoringOther || SI->getOperand(0) == InitVal) { | |
1646 StoreVal = ConstantInt::get(Type::getInt1Ty(GV->getContext()), | |
1647 StoringOther); | |
1648 } else { | |
1649 // Otherwise, we are storing a previously loaded copy. To do this, | |
1650 // change the copy from copying the original value to just copying the | |
1651 // bool. | |
1652 Instruction *StoredVal = cast<Instruction>(SI->getOperand(0)); | |
1653 | |
1654 // If we've already replaced the input, StoredVal will be a cast or | |
1655 // select instruction. If not, it will be a load of the original | |
1656 // global. | |
1657 if (LoadInst *LI = dyn_cast<LoadInst>(StoredVal)) { | |
1658 assert(LI->getOperand(0) == GV && "Not a copy!"); | |
1659 // Insert a new load, to preserve the saved value. | |
1660 StoreVal = new LoadInst(NewGV, LI->getName()+".b", false, 0, | |
1661 LI->getOrdering(), LI->getSynchScope(), LI); | |
1662 } else { | |
1663 assert((isa<CastInst>(StoredVal) || isa<SelectInst>(StoredVal)) && | |
1664 "This is not a form that we understand!"); | |
1665 StoreVal = StoredVal->getOperand(0); | |
1666 assert(isa<LoadInst>(StoreVal) && "Not a load of NewGV!"); | |
1667 } | |
1668 } | |
1669 new StoreInst(StoreVal, NewGV, false, 0, | |
1670 SI->getOrdering(), SI->getSynchScope(), SI); | |
1671 } else { | |
1672 // Change the load into a load of bool then a select. | |
1673 LoadInst *LI = cast<LoadInst>(UI); | |
1674 LoadInst *NLI = new LoadInst(NewGV, LI->getName()+".b", false, 0, | |
1675 LI->getOrdering(), LI->getSynchScope(), LI); | |
1676 Value *NSI; | |
1677 if (IsOneZero) | |
1678 NSI = new ZExtInst(NLI, LI->getType(), "", LI); | |
1679 else | |
1680 NSI = SelectInst::Create(NLI, OtherVal, InitVal, "", LI); | |
1681 NSI->takeName(LI); | |
1682 LI->replaceAllUsesWith(NSI); | |
1683 } | |
1684 UI->eraseFromParent(); | |
1685 } | |
1686 | |
1687 // Retain the name of the old global variable. People who are debugging their | |
1688 // programs may expect these variables to be named the same. | |
1689 NewGV->takeName(GV); | |
1690 GV->eraseFromParent(); | |
1691 return true; | |
1692 } | |
1693 | |
1694 | |
1695 /// ProcessGlobal - Analyze the specified global variable and optimize it if | |
1696 /// possible. If we make a change, return true. | |
1697 bool GlobalOpt::ProcessGlobal(GlobalVariable *GV, | |
1698 Module::global_iterator &GVI) { | |
1699 if (!GV->isDiscardableIfUnused()) | |
1700 return false; | |
1701 | |
1702 // Do more involved optimizations if the global is internal. | |
1703 GV->removeDeadConstantUsers(); | |
1704 | |
1705 if (GV->use_empty()) { | |
1706 DEBUG(dbgs() << "GLOBAL DEAD: " << *GV); | |
1707 GV->eraseFromParent(); | |
1708 ++NumDeleted; | |
1709 return true; | |
1710 } | |
1711 | |
1712 if (!GV->hasLocalLinkage()) | |
1713 return false; | |
1714 | |
1715 GlobalStatus GS; | |
1716 | |
1717 if (GlobalStatus::analyzeGlobal(GV, GS)) | |
1718 return false; | |
1719 | |
1720 if (!GS.IsCompared && !GV->hasUnnamedAddr()) { | |
1721 GV->setUnnamedAddr(true); | |
1722 NumUnnamed++; | |
1723 } | |
1724 | |
1725 if (GV->isConstant() || !GV->hasInitializer()) | |
1726 return false; | |
1727 | |
1728 return ProcessInternalGlobal(GV, GVI, GS); | |
1729 } | |
1730 | |
1731 /// ProcessInternalGlobal - Analyze the specified global variable and optimize | |
1732 /// it if possible. If we make a change, return true. | |
1733 bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV, | |
1734 Module::global_iterator &GVI, | |
1735 const GlobalStatus &GS) { | |
1736 // If this is a first class global and has only one accessing function | |
1737 // and this function is main (which we know is not recursive), we replace | |
1738 // the global with a local alloca in this function. | |
1739 // | |
1740 // NOTE: It doesn't make sense to promote non single-value types since we | |
1741 // are just replacing static memory to stack memory. | |
1742 // | |
1743 // If the global is in different address space, don't bring it to stack. | |
1744 if (!GS.HasMultipleAccessingFunctions && | |
1745 GS.AccessingFunction && !GS.HasNonInstructionUser && | |
1746 GV->getType()->getElementType()->isSingleValueType() && | |
1747 GS.AccessingFunction->getName() == "main" && | |
1748 GS.AccessingFunction->hasExternalLinkage() && | |
1749 GV->getType()->getAddressSpace() == 0) { | |
1750 DEBUG(dbgs() << "LOCALIZING GLOBAL: " << *GV); | |
1751 Instruction &FirstI = const_cast<Instruction&>(*GS.AccessingFunction | |
1752 ->getEntryBlock().begin()); | |
1753 Type *ElemTy = GV->getType()->getElementType(); | |
1754 // FIXME: Pass Global's alignment when globals have alignment | |
1755 AllocaInst *Alloca = new AllocaInst(ElemTy, NULL, GV->getName(), &FirstI); | |
1756 if (!isa<UndefValue>(GV->getInitializer())) | |
1757 new StoreInst(GV->getInitializer(), Alloca, &FirstI); | |
1758 | |
1759 GV->replaceAllUsesWith(Alloca); | |
1760 GV->eraseFromParent(); | |
1761 ++NumLocalized; | |
1762 return true; | |
1763 } | |
1764 | |
1765 // If the global is never loaded (but may be stored to), it is dead. | |
1766 // Delete it now. | |
1767 if (!GS.IsLoaded) { | |
1768 DEBUG(dbgs() << "GLOBAL NEVER LOADED: " << *GV); | |
1769 | |
1770 bool Changed; | |
1771 if (isLeakCheckerRoot(GV)) { | |
1772 // Delete any constant stores to the global. | |
1773 Changed = CleanupPointerRootUsers(GV, TLI); | |
1774 } else { | |
1775 // Delete any stores we can find to the global. We may not be able to | |
1776 // make it completely dead though. | |
1777 Changed = CleanupConstantGlobalUsers(GV, GV->getInitializer(), TD, TLI); | |
1778 } | |
1779 | |
1780 // If the global is dead now, delete it. | |
1781 if (GV->use_empty()) { | |
1782 GV->eraseFromParent(); | |
1783 ++NumDeleted; | |
1784 Changed = true; | |
1785 } | |
1786 return Changed; | |
1787 | |
1788 } else if (GS.StoredType <= GlobalStatus::InitializerStored) { | |
1789 DEBUG(dbgs() << "MARKING CONSTANT: " << *GV << "\n"); | |
1790 GV->setConstant(true); | |
1791 | |
1792 // Clean up any obviously simplifiable users now. | |
1793 CleanupConstantGlobalUsers(GV, GV->getInitializer(), TD, TLI); | |
1794 | |
1795 // If the global is dead now, just nuke it. | |
1796 if (GV->use_empty()) { | |
1797 DEBUG(dbgs() << " *** Marking constant allowed us to simplify " | |
1798 << "all users and delete global!\n"); | |
1799 GV->eraseFromParent(); | |
1800 ++NumDeleted; | |
1801 } | |
1802 | |
1803 ++NumMarked; | |
1804 return true; | |
1805 } else if (!GV->getInitializer()->getType()->isSingleValueType()) { | |
1806 if (DataLayout *TD = getAnalysisIfAvailable<DataLayout>()) | |
1807 if (GlobalVariable *FirstNewGV = SRAGlobal(GV, *TD)) { | |
1808 GVI = FirstNewGV; // Don't skip the newly produced globals! | |
1809 return true; | |
1810 } | |
1811 } else if (GS.StoredType == GlobalStatus::StoredOnce) { | |
1812 // If the initial value for the global was an undef value, and if only | |
1813 // one other value was stored into it, we can just change the | |
1814 // initializer to be the stored value, then delete all stores to the | |
1815 // global. This allows us to mark it constant. | |
1816 if (Constant *SOVConstant = dyn_cast<Constant>(GS.StoredOnceValue)) | |
1817 if (isa<UndefValue>(GV->getInitializer())) { | |
1818 // Change the initial value here. | |
1819 GV->setInitializer(SOVConstant); | |
1820 | |
1821 // Clean up any obviously simplifiable users now. | |
1822 CleanupConstantGlobalUsers(GV, GV->getInitializer(), TD, TLI); | |
1823 | |
1824 if (GV->use_empty()) { | |
1825 DEBUG(dbgs() << " *** Substituting initializer allowed us to " | |
1826 << "simplify all users and delete global!\n"); | |
1827 GV->eraseFromParent(); | |
1828 ++NumDeleted; | |
1829 } else { | |
1830 GVI = GV; | |
1831 } | |
1832 ++NumSubstitute; | |
1833 return true; | |
1834 } | |
1835 | |
1836 // Try to optimize globals based on the knowledge that only one value | |
1837 // (besides its initializer) is ever stored to the global. | |
1838 if (OptimizeOnceStoredGlobal(GV, GS.StoredOnceValue, GS.Ordering, GVI, | |
1839 TD, TLI)) | |
1840 return true; | |
1841 | |
1842 // Otherwise, if the global was not a boolean, we can shrink it to be a | |
1843 // boolean. | |
1844 if (Constant *SOVConstant = dyn_cast<Constant>(GS.StoredOnceValue)) { | |
1845 if (GS.Ordering == NotAtomic) { | |
1846 if (TryToShrinkGlobalToBoolean(GV, SOVConstant)) { | |
1847 ++NumShrunkToBool; | |
1848 return true; | |
1849 } | |
1850 } | |
1851 } | |
1852 } | |
1853 | |
1854 return false; | |
1855 } | |
1856 | |
1857 /// ChangeCalleesToFastCall - Walk all of the direct calls of the specified | |
1858 /// function, changing them to FastCC. | |
1859 static void ChangeCalleesToFastCall(Function *F) { | |
1860 for (Value::use_iterator UI = F->use_begin(), E = F->use_end(); UI != E;++UI){ | |
1861 if (isa<BlockAddress>(*UI)) | |
1862 continue; | |
1863 CallSite User(cast<Instruction>(*UI)); | |
1864 User.setCallingConv(CallingConv::Fast); | |
1865 } | |
1866 } | |
1867 | |
1868 static AttributeSet StripNest(LLVMContext &C, const AttributeSet &Attrs) { | |
1869 for (unsigned i = 0, e = Attrs.getNumSlots(); i != e; ++i) { | |
1870 unsigned Index = Attrs.getSlotIndex(i); | |
1871 if (!Attrs.getSlotAttributes(i).hasAttribute(Index, Attribute::Nest)) | |
1872 continue; | |
1873 | |
1874 // There can be only one. | |
1875 return Attrs.removeAttribute(C, Index, Attribute::Nest); | |
1876 } | |
1877 | |
1878 return Attrs; | |
1879 } | |
1880 | |
1881 static void RemoveNestAttribute(Function *F) { | |
1882 F->setAttributes(StripNest(F->getContext(), F->getAttributes())); | |
1883 for (Value::use_iterator UI = F->use_begin(), E = F->use_end(); UI != E;++UI){ | |
1884 if (isa<BlockAddress>(*UI)) | |
1885 continue; | |
1886 CallSite User(cast<Instruction>(*UI)); | |
1887 User.setAttributes(StripNest(F->getContext(), User.getAttributes())); | |
1888 } | |
1889 } | |
1890 | |
1891 bool GlobalOpt::OptimizeFunctions(Module &M) { | |
1892 bool Changed = false; | |
1893 // Optimize functions. | |
1894 for (Module::iterator FI = M.begin(), E = M.end(); FI != E; ) { | |
1895 Function *F = FI++; | |
1896 // Functions without names cannot be referenced outside this module. | |
1897 if (!F->hasName() && !F->isDeclaration()) | |
1898 F->setLinkage(GlobalValue::InternalLinkage); | |
1899 F->removeDeadConstantUsers(); | |
1900 if (F->isDefTriviallyDead()) { | |
1901 F->eraseFromParent(); | |
1902 Changed = true; | |
1903 ++NumFnDeleted; | |
1904 } else if (F->hasLocalLinkage()) { | |
1905 if (F->getCallingConv() == CallingConv::C && !F->isVarArg() && | |
1906 !F->hasAddressTaken()) { | |
1907 // If this function has C calling conventions, is not a varargs | |
1908 // function, and is only called directly, promote it to use the Fast | |
1909 // calling convention. | |
1910 F->setCallingConv(CallingConv::Fast); | |
1911 ChangeCalleesToFastCall(F); | |
1912 ++NumFastCallFns; | |
1913 Changed = true; | |
1914 } | |
1915 | |
1916 if (F->getAttributes().hasAttrSomewhere(Attribute::Nest) && | |
1917 !F->hasAddressTaken()) { | |
1918 // The function is not used by a trampoline intrinsic, so it is safe | |
1919 // to remove the 'nest' attribute. | |
1920 RemoveNestAttribute(F); | |
1921 ++NumNestRemoved; | |
1922 Changed = true; | |
1923 } | |
1924 } | |
1925 } | |
1926 return Changed; | |
1927 } | |
1928 | |
1929 bool GlobalOpt::OptimizeGlobalVars(Module &M) { | |
1930 bool Changed = false; | |
1931 for (Module::global_iterator GVI = M.global_begin(), E = M.global_end(); | |
1932 GVI != E; ) { | |
1933 GlobalVariable *GV = GVI++; | |
1934 // Global variables without names cannot be referenced outside this module. | |
1935 if (!GV->hasName() && !GV->isDeclaration()) | |
1936 GV->setLinkage(GlobalValue::InternalLinkage); | |
1937 // Simplify the initializer. | |
1938 if (GV->hasInitializer()) | |
1939 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(GV->getInitializer())) { | |
1940 Constant *New = ConstantFoldConstantExpression(CE, TD, TLI); | |
1941 if (New && New != CE) | |
1942 GV->setInitializer(New); | |
1943 } | |
1944 | |
1945 Changed |= ProcessGlobal(GV, GVI); | |
1946 } | |
1947 return Changed; | |
1948 } | |
1949 | |
1950 /// FindGlobalCtors - Find the llvm.global_ctors list, verifying that all | |
1951 /// initializers have an init priority of 65535. | |
1952 GlobalVariable *GlobalOpt::FindGlobalCtors(Module &M) { | |
1953 GlobalVariable *GV = M.getGlobalVariable("llvm.global_ctors"); | |
1954 if (GV == 0) return 0; | |
1955 | |
1956 // Verify that the initializer is simple enough for us to handle. We are | |
1957 // only allowed to optimize the initializer if it is unique. | |
1958 if (!GV->hasUniqueInitializer()) return 0; | |
1959 | |
1960 if (isa<ConstantAggregateZero>(GV->getInitializer())) | |
1961 return GV; | |
1962 ConstantArray *CA = cast<ConstantArray>(GV->getInitializer()); | |
1963 | |
1964 for (User::op_iterator i = CA->op_begin(), e = CA->op_end(); i != e; ++i) { | |
1965 if (isa<ConstantAggregateZero>(*i)) | |
1966 continue; | |
1967 ConstantStruct *CS = cast<ConstantStruct>(*i); | |
1968 if (isa<ConstantPointerNull>(CS->getOperand(1))) | |
1969 continue; | |
1970 | |
1971 // Must have a function or null ptr. | |
1972 if (!isa<Function>(CS->getOperand(1))) | |
1973 return 0; | |
1974 | |
1975 // Init priority must be standard. | |
1976 ConstantInt *CI = cast<ConstantInt>(CS->getOperand(0)); | |
1977 if (CI->getZExtValue() != 65535) | |
1978 return 0; | |
1979 } | |
1980 | |
1981 return GV; | |
1982 } | |
1983 | |
1984 /// ParseGlobalCtors - Given a llvm.global_ctors list that we can understand, | |
1985 /// return a list of the functions and null terminator as a vector. | |
1986 static std::vector<Function*> ParseGlobalCtors(GlobalVariable *GV) { | |
1987 if (GV->getInitializer()->isNullValue()) | |
1988 return std::vector<Function*>(); | |
1989 ConstantArray *CA = cast<ConstantArray>(GV->getInitializer()); | |
1990 std::vector<Function*> Result; | |
1991 Result.reserve(CA->getNumOperands()); | |
1992 for (User::op_iterator i = CA->op_begin(), e = CA->op_end(); i != e; ++i) { | |
1993 ConstantStruct *CS = cast<ConstantStruct>(*i); | |
1994 Result.push_back(dyn_cast<Function>(CS->getOperand(1))); | |
1995 } | |
1996 return Result; | |
1997 } | |
1998 | |
1999 /// InstallGlobalCtors - Given a specified llvm.global_ctors list, install the | |
2000 /// specified array, returning the new global to use. | |
2001 static GlobalVariable *InstallGlobalCtors(GlobalVariable *GCL, | |
2002 const std::vector<Function*> &Ctors) { | |
2003 // If we made a change, reassemble the initializer list. | |
2004 Constant *CSVals[2]; | |
2005 CSVals[0] = ConstantInt::get(Type::getInt32Ty(GCL->getContext()), 65535); | |
2006 CSVals[1] = 0; | |
2007 | |
2008 StructType *StructTy = | |
2009 cast<StructType>(GCL->getType()->getElementType()->getArrayElementType()); | |
2010 | |
2011 // Create the new init list. | |
2012 std::vector<Constant*> CAList; | |
2013 for (unsigned i = 0, e = Ctors.size(); i != e; ++i) { | |
2014 if (Ctors[i]) { | |
2015 CSVals[1] = Ctors[i]; | |
2016 } else { | |
2017 Type *FTy = FunctionType::get(Type::getVoidTy(GCL->getContext()), | |
2018 false); | |
2019 PointerType *PFTy = PointerType::getUnqual(FTy); | |
2020 CSVals[1] = Constant::getNullValue(PFTy); | |
2021 CSVals[0] = ConstantInt::get(Type::getInt32Ty(GCL->getContext()), | |
2022 0x7fffffff); | |
2023 } | |
2024 CAList.push_back(ConstantStruct::get(StructTy, CSVals)); | |
2025 } | |
2026 | |
2027 // Create the array initializer. | |
2028 Constant *CA = ConstantArray::get(ArrayType::get(StructTy, | |
2029 CAList.size()), CAList); | |
2030 | |
2031 // If we didn't change the number of elements, don't create a new GV. | |
2032 if (CA->getType() == GCL->getInitializer()->getType()) { | |
2033 GCL->setInitializer(CA); | |
2034 return GCL; | |
2035 } | |
2036 | |
2037 // Create the new global and insert it next to the existing list. | |
2038 GlobalVariable *NGV = new GlobalVariable(CA->getType(), GCL->isConstant(), | |
2039 GCL->getLinkage(), CA, "", | |
2040 GCL->getThreadLocalMode()); | |
2041 GCL->getParent()->getGlobalList().insert(GCL, NGV); | |
2042 NGV->takeName(GCL); | |
2043 | |
2044 // Nuke the old list, replacing any uses with the new one. | |
2045 if (!GCL->use_empty()) { | |
2046 Constant *V = NGV; | |
2047 if (V->getType() != GCL->getType()) | |
2048 V = ConstantExpr::getBitCast(V, GCL->getType()); | |
2049 GCL->replaceAllUsesWith(V); | |
2050 } | |
2051 GCL->eraseFromParent(); | |
2052 | |
2053 if (Ctors.size()) | |
2054 return NGV; | |
2055 else | |
2056 return 0; | |
2057 } | |
2058 | |
2059 | |
2060 static inline bool | |
2061 isSimpleEnoughValueToCommit(Constant *C, | |
2062 SmallPtrSet<Constant*, 8> &SimpleConstants, | |
2063 const DataLayout *TD); | |
2064 | |
2065 | |
2066 /// isSimpleEnoughValueToCommit - Return true if the specified constant can be | |
2067 /// handled by the code generator. We don't want to generate something like: | |
2068 /// void *X = &X/42; | |
2069 /// because the code generator doesn't have a relocation that can handle that. | |
2070 /// | |
2071 /// This function should be called if C was not found (but just got inserted) | |
2072 /// in SimpleConstants to avoid having to rescan the same constants all the | |
2073 /// time. | |
2074 static bool isSimpleEnoughValueToCommitHelper(Constant *C, | |
2075 SmallPtrSet<Constant*, 8> &SimpleConstants, | |
2076 const DataLayout *TD) { | |
2077 // Simple integer, undef, constant aggregate zero, global addresses, etc are | |
2078 // all supported. | |
2079 if (C->getNumOperands() == 0 || isa<BlockAddress>(C) || | |
2080 isa<GlobalValue>(C)) | |
2081 return true; | |
2082 | |
2083 // Aggregate values are safe if all their elements are. | |
2084 if (isa<ConstantArray>(C) || isa<ConstantStruct>(C) || | |
2085 isa<ConstantVector>(C)) { | |
2086 for (unsigned i = 0, e = C->getNumOperands(); i != e; ++i) { | |
2087 Constant *Op = cast<Constant>(C->getOperand(i)); | |
2088 if (!isSimpleEnoughValueToCommit(Op, SimpleConstants, TD)) | |
2089 return false; | |
2090 } | |
2091 return true; | |
2092 } | |
2093 | |
2094 // We don't know exactly what relocations are allowed in constant expressions, | |
2095 // so we allow &global+constantoffset, which is safe and uniformly supported | |
2096 // across targets. | |
2097 ConstantExpr *CE = cast<ConstantExpr>(C); | |
2098 switch (CE->getOpcode()) { | |
2099 case Instruction::BitCast: | |
2100 // Bitcast is fine if the casted value is fine. | |
2101 return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, TD); | |
2102 | |
2103 case Instruction::IntToPtr: | |
2104 case Instruction::PtrToInt: | |
2105 // int <=> ptr is fine if the int type is the same size as the | |
2106 // pointer type. | |
2107 if (!TD || TD->getTypeSizeInBits(CE->getType()) != | |
2108 TD->getTypeSizeInBits(CE->getOperand(0)->getType())) | |
2109 return false; | |
2110 return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, TD); | |
2111 | |
2112 // GEP is fine if it is simple + constant offset. | |
2113 case Instruction::GetElementPtr: | |
2114 for (unsigned i = 1, e = CE->getNumOperands(); i != e; ++i) | |
2115 if (!isa<ConstantInt>(CE->getOperand(i))) | |
2116 return false; | |
2117 return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, TD); | |
2118 | |
2119 case Instruction::Add: | |
2120 // We allow simple+cst. | |
2121 if (!isa<ConstantInt>(CE->getOperand(1))) | |
2122 return false; | |
2123 return isSimpleEnoughValueToCommit(CE->getOperand(0), SimpleConstants, TD); | |
2124 } | |
2125 return false; | |
2126 } | |
2127 | |
2128 static inline bool | |
2129 isSimpleEnoughValueToCommit(Constant *C, | |
2130 SmallPtrSet<Constant*, 8> &SimpleConstants, | |
2131 const DataLayout *TD) { | |
2132 // If we already checked this constant, we win. | |
2133 if (!SimpleConstants.insert(C)) return true; | |
2134 // Check the constant. | |
2135 return isSimpleEnoughValueToCommitHelper(C, SimpleConstants, TD); | |
2136 } | |
2137 | |
2138 | |
2139 /// isSimpleEnoughPointerToCommit - Return true if this constant is simple | |
2140 /// enough for us to understand. In particular, if it is a cast to anything | |
2141 /// other than from one pointer type to another pointer type, we punt. | |
2142 /// We basically just support direct accesses to globals and GEP's of | |
2143 /// globals. This should be kept up to date with CommitValueTo. | |
2144 static bool isSimpleEnoughPointerToCommit(Constant *C) { | |
2145 // Conservatively, avoid aggregate types. This is because we don't | |
2146 // want to worry about them partially overlapping other stores. | |
2147 if (!cast<PointerType>(C->getType())->getElementType()->isSingleValueType()) | |
2148 return false; | |
2149 | |
2150 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(C)) | |
2151 // Do not allow weak/*_odr/linkonce/dllimport/dllexport linkage or | |
2152 // external globals. | |
2153 return GV->hasUniqueInitializer(); | |
2154 | |
2155 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) { | |
2156 // Handle a constantexpr gep. | |
2157 if (CE->getOpcode() == Instruction::GetElementPtr && | |
2158 isa<GlobalVariable>(CE->getOperand(0)) && | |
2159 cast<GEPOperator>(CE)->isInBounds()) { | |
2160 GlobalVariable *GV = cast<GlobalVariable>(CE->getOperand(0)); | |
2161 // Do not allow weak/*_odr/linkonce/dllimport/dllexport linkage or | |
2162 // external globals. | |
2163 if (!GV->hasUniqueInitializer()) | |
2164 return false; | |
2165 | |
2166 // The first index must be zero. | |
2167 ConstantInt *CI = dyn_cast<ConstantInt>(*llvm::next(CE->op_begin())); | |
2168 if (!CI || !CI->isZero()) return false; | |
2169 | |
2170 // The remaining indices must be compile-time known integers within the | |
2171 // notional bounds of the corresponding static array types. | |
2172 if (!CE->isGEPWithNoNotionalOverIndexing()) | |
2173 return false; | |
2174 | |
2175 return ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE); | |
2176 | |
2177 // A constantexpr bitcast from a pointer to another pointer is a no-op, | |
2178 // and we know how to evaluate it by moving the bitcast from the pointer | |
2179 // operand to the value operand. | |
2180 } else if (CE->getOpcode() == Instruction::BitCast && | |
2181 isa<GlobalVariable>(CE->getOperand(0))) { | |
2182 // Do not allow weak/*_odr/linkonce/dllimport/dllexport linkage or | |
2183 // external globals. | |
2184 return cast<GlobalVariable>(CE->getOperand(0))->hasUniqueInitializer(); | |
2185 } | |
2186 } | |
2187 | |
2188 return false; | |
2189 } | |
2190 | |
2191 /// EvaluateStoreInto - Evaluate a piece of a constantexpr store into a global | |
2192 /// initializer. This returns 'Init' modified to reflect 'Val' stored into it. | |
2193 /// At this point, the GEP operands of Addr [0, OpNo) have been stepped into. | |
2194 static Constant *EvaluateStoreInto(Constant *Init, Constant *Val, | |
2195 ConstantExpr *Addr, unsigned OpNo) { | |
2196 // Base case of the recursion. | |
2197 if (OpNo == Addr->getNumOperands()) { | |
2198 assert(Val->getType() == Init->getType() && "Type mismatch!"); | |
2199 return Val; | |
2200 } | |
2201 | |
2202 SmallVector<Constant*, 32> Elts; | |
2203 if (StructType *STy = dyn_cast<StructType>(Init->getType())) { | |
2204 // Break up the constant into its elements. | |
2205 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) | |
2206 Elts.push_back(Init->getAggregateElement(i)); | |
2207 | |
2208 // Replace the element that we are supposed to. | |
2209 ConstantInt *CU = cast<ConstantInt>(Addr->getOperand(OpNo)); | |
2210 unsigned Idx = CU->getZExtValue(); | |
2211 assert(Idx < STy->getNumElements() && "Struct index out of range!"); | |
2212 Elts[Idx] = EvaluateStoreInto(Elts[Idx], Val, Addr, OpNo+1); | |
2213 | |
2214 // Return the modified struct. | |
2215 return ConstantStruct::get(STy, Elts); | |
2216 } | |
2217 | |
2218 ConstantInt *CI = cast<ConstantInt>(Addr->getOperand(OpNo)); | |
2219 SequentialType *InitTy = cast<SequentialType>(Init->getType()); | |
2220 | |
2221 uint64_t NumElts; | |
2222 if (ArrayType *ATy = dyn_cast<ArrayType>(InitTy)) | |
2223 NumElts = ATy->getNumElements(); | |
2224 else | |
2225 NumElts = InitTy->getVectorNumElements(); | |
2226 | |
2227 // Break up the array into elements. | |
2228 for (uint64_t i = 0, e = NumElts; i != e; ++i) | |
2229 Elts.push_back(Init->getAggregateElement(i)); | |
2230 | |
2231 assert(CI->getZExtValue() < NumElts); | |
2232 Elts[CI->getZExtValue()] = | |
2233 EvaluateStoreInto(Elts[CI->getZExtValue()], Val, Addr, OpNo+1); | |
2234 | |
2235 if (Init->getType()->isArrayTy()) | |
2236 return ConstantArray::get(cast<ArrayType>(InitTy), Elts); | |
2237 return ConstantVector::get(Elts); | |
2238 } | |
2239 | |
2240 /// CommitValueTo - We have decided that Addr (which satisfies the predicate | |
2241 /// isSimpleEnoughPointerToCommit) should get Val as its value. Make it happen. | |
2242 static void CommitValueTo(Constant *Val, Constant *Addr) { | |
2243 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Addr)) { | |
2244 assert(GV->hasInitializer()); | |
2245 GV->setInitializer(Val); | |
2246 return; | |
2247 } | |
2248 | |
2249 ConstantExpr *CE = cast<ConstantExpr>(Addr); | |
2250 GlobalVariable *GV = cast<GlobalVariable>(CE->getOperand(0)); | |
2251 GV->setInitializer(EvaluateStoreInto(GV->getInitializer(), Val, CE, 2)); | |
2252 } | |
2253 | |
2254 namespace { | |
2255 | |
2256 /// Evaluator - This class evaluates LLVM IR, producing the Constant | |
2257 /// representing each SSA instruction. Changes to global variables are stored | |
2258 /// in a mapping that can be iterated over after the evaluation is complete. | |
2259 /// Once an evaluation call fails, the evaluation object should not be reused. | |
2260 class Evaluator { | |
2261 public: | |
2262 Evaluator(const DataLayout *TD, const TargetLibraryInfo *TLI) | |
2263 : TD(TD), TLI(TLI) { | |
2264 ValueStack.push_back(new DenseMap<Value*, Constant*>); | |
2265 } | |
2266 | |
2267 ~Evaluator() { | |
2268 DeleteContainerPointers(ValueStack); | |
2269 while (!AllocaTmps.empty()) { | |
2270 GlobalVariable *Tmp = AllocaTmps.back(); | |
2271 AllocaTmps.pop_back(); | |
2272 | |
2273 // If there are still users of the alloca, the program is doing something | |
2274 // silly, e.g. storing the address of the alloca somewhere and using it | |
2275 // later. Since this is undefined, we'll just make it be null. | |
2276 if (!Tmp->use_empty()) | |
2277 Tmp->replaceAllUsesWith(Constant::getNullValue(Tmp->getType())); | |
2278 delete Tmp; | |
2279 } | |
2280 } | |
2281 | |
2282 /// EvaluateFunction - Evaluate a call to function F, returning true if | |
2283 /// successful, false if we can't evaluate it. ActualArgs contains the formal | |
2284 /// arguments for the function. | |
2285 bool EvaluateFunction(Function *F, Constant *&RetVal, | |
2286 const SmallVectorImpl<Constant*> &ActualArgs); | |
2287 | |
2288 /// EvaluateBlock - Evaluate all instructions in block BB, returning true if | |
2289 /// successful, false if we can't evaluate it. NewBB returns the next BB that | |
2290 /// control flows into, or null upon return. | |
2291 bool EvaluateBlock(BasicBlock::iterator CurInst, BasicBlock *&NextBB); | |
2292 | |
2293 Constant *getVal(Value *V) { | |
2294 if (Constant *CV = dyn_cast<Constant>(V)) return CV; | |
2295 Constant *R = ValueStack.back()->lookup(V); | |
2296 assert(R && "Reference to an uncomputed value!"); | |
2297 return R; | |
2298 } | |
2299 | |
2300 void setVal(Value *V, Constant *C) { | |
2301 ValueStack.back()->operator[](V) = C; | |
2302 } | |
2303 | |
2304 const DenseMap<Constant*, Constant*> &getMutatedMemory() const { | |
2305 return MutatedMemory; | |
2306 } | |
2307 | |
2308 const SmallPtrSet<GlobalVariable*, 8> &getInvariants() const { | |
2309 return Invariants; | |
2310 } | |
2311 | |
2312 private: | |
2313 Constant *ComputeLoadResult(Constant *P); | |
2314 | |
2315 /// ValueStack - As we compute SSA register values, we store their contents | |
2316 /// here. The back of the vector contains the current function and the stack | |
2317 /// contains the values in the calling frames. | |
2318 SmallVector<DenseMap<Value*, Constant*>*, 4> ValueStack; | |
2319 | |
2320 /// CallStack - This is used to detect recursion. In pathological situations | |
2321 /// we could hit exponential behavior, but at least there is nothing | |
2322 /// unbounded. | |
2323 SmallVector<Function*, 4> CallStack; | |
2324 | |
2325 /// MutatedMemory - For each store we execute, we update this map. Loads | |
2326 /// check this to get the most up-to-date value. If evaluation is successful, | |
2327 /// this state is committed to the process. | |
2328 DenseMap<Constant*, Constant*> MutatedMemory; | |
2329 | |
2330 /// AllocaTmps - To 'execute' an alloca, we create a temporary global variable | |
2331 /// to represent its body. This vector is needed so we can delete the | |
2332 /// temporary globals when we are done. | |
2333 SmallVector<GlobalVariable*, 32> AllocaTmps; | |
2334 | |
2335 /// Invariants - These global variables have been marked invariant by the | |
2336 /// static constructor. | |
2337 SmallPtrSet<GlobalVariable*, 8> Invariants; | |
2338 | |
2339 /// SimpleConstants - These are constants we have checked and know to be | |
2340 /// simple enough to live in a static initializer of a global. | |
2341 SmallPtrSet<Constant*, 8> SimpleConstants; | |
2342 | |
2343 const DataLayout *TD; | |
2344 const TargetLibraryInfo *TLI; | |
2345 }; | |
2346 | |
2347 } // anonymous namespace | |
2348 | |
2349 /// ComputeLoadResult - Return the value that would be computed by a load from | |
2350 /// P after the stores reflected by 'memory' have been performed. If we can't | |
2351 /// decide, return null. | |
2352 Constant *Evaluator::ComputeLoadResult(Constant *P) { | |
2353 // If this memory location has been recently stored, use the stored value: it | |
2354 // is the most up-to-date. | |
2355 DenseMap<Constant*, Constant*>::const_iterator I = MutatedMemory.find(P); | |
2356 if (I != MutatedMemory.end()) return I->second; | |
2357 | |
2358 // Access it. | |
2359 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(P)) { | |
2360 if (GV->hasDefinitiveInitializer()) | |
2361 return GV->getInitializer(); | |
2362 return 0; | |
2363 } | |
2364 | |
2365 // Handle a constantexpr getelementptr. | |
2366 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(P)) | |
2367 if (CE->getOpcode() == Instruction::GetElementPtr && | |
2368 isa<GlobalVariable>(CE->getOperand(0))) { | |
2369 GlobalVariable *GV = cast<GlobalVariable>(CE->getOperand(0)); | |
2370 if (GV->hasDefinitiveInitializer()) | |
2371 return ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE); | |
2372 } | |
2373 | |
2374 return 0; // don't know how to evaluate. | |
2375 } | |
2376 | |
2377 /// EvaluateBlock - Evaluate all instructions in block BB, returning true if | |
2378 /// successful, false if we can't evaluate it. NewBB returns the next BB that | |
2379 /// control flows into, or null upon return. | |
2380 bool Evaluator::EvaluateBlock(BasicBlock::iterator CurInst, | |
2381 BasicBlock *&NextBB) { | |
2382 // This is the main evaluation loop. | |
2383 while (1) { | |
2384 Constant *InstResult = 0; | |
2385 | |
2386 DEBUG(dbgs() << "Evaluating Instruction: " << *CurInst << "\n"); | |
2387 | |
2388 if (StoreInst *SI = dyn_cast<StoreInst>(CurInst)) { | |
2389 if (!SI->isSimple()) { | |
2390 DEBUG(dbgs() << "Store is not simple! Can not evaluate.\n"); | |
2391 return false; // no volatile/atomic accesses. | |
2392 } | |
2393 Constant *Ptr = getVal(SI->getOperand(1)); | |
2394 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr)) { | |
2395 DEBUG(dbgs() << "Folding constant ptr expression: " << *Ptr); | |
2396 Ptr = ConstantFoldConstantExpression(CE, TD, TLI); | |
2397 DEBUG(dbgs() << "; To: " << *Ptr << "\n"); | |
2398 } | |
2399 if (!isSimpleEnoughPointerToCommit(Ptr)) { | |
2400 // If this is too complex for us to commit, reject it. | |
2401 DEBUG(dbgs() << "Pointer is too complex for us to evaluate store."); | |
2402 return false; | |
2403 } | |
2404 | |
2405 Constant *Val = getVal(SI->getOperand(0)); | |
2406 | |
2407 // If this might be too difficult for the backend to handle (e.g. the addr | |
2408 // of one global variable divided by another) then we can't commit it. | |
2409 if (!isSimpleEnoughValueToCommit(Val, SimpleConstants, TD)) { | |
2410 DEBUG(dbgs() << "Store value is too complex to evaluate store. " << *Val | |
2411 << "\n"); | |
2412 return false; | |
2413 } | |
2414 | |
2415 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr)) { | |
2416 if (CE->getOpcode() == Instruction::BitCast) { | |
2417 DEBUG(dbgs() << "Attempting to resolve bitcast on constant ptr.\n"); | |
2418 // If we're evaluating a store through a bitcast, then we need | |
2419 // to pull the bitcast off the pointer type and push it onto the | |
2420 // stored value. | |
2421 Ptr = CE->getOperand(0); | |
2422 | |
2423 Type *NewTy = cast<PointerType>(Ptr->getType())->getElementType(); | |
2424 | |
2425 // In order to push the bitcast onto the stored value, a bitcast | |
2426 // from NewTy to Val's type must be legal. If it's not, we can try | |
2427 // introspecting NewTy to find a legal conversion. | |
2428 while (!Val->getType()->canLosslesslyBitCastTo(NewTy)) { | |
2429 // If NewTy is a struct, we can convert the pointer to the struct | |
2430 // into a pointer to its first member. | |
2431 // FIXME: This could be extended to support arrays as well. | |
2432 if (StructType *STy = dyn_cast<StructType>(NewTy)) { | |
2433 NewTy = STy->getTypeAtIndex(0U); | |
2434 | |
2435 IntegerType *IdxTy = IntegerType::get(NewTy->getContext(), 32); | |
2436 Constant *IdxZero = ConstantInt::get(IdxTy, 0, false); | |
2437 Constant * const IdxList[] = {IdxZero, IdxZero}; | |
2438 | |
2439 Ptr = ConstantExpr::getGetElementPtr(Ptr, IdxList); | |
2440 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr)) | |
2441 Ptr = ConstantFoldConstantExpression(CE, TD, TLI); | |
2442 | |
2443 // If we can't improve the situation by introspecting NewTy, | |
2444 // we have to give up. | |
2445 } else { | |
2446 DEBUG(dbgs() << "Failed to bitcast constant ptr, can not " | |
2447 "evaluate.\n"); | |
2448 return false; | |
2449 } | |
2450 } | |
2451 | |
2452 // If we found compatible types, go ahead and push the bitcast | |
2453 // onto the stored value. | |
2454 Val = ConstantExpr::getBitCast(Val, NewTy); | |
2455 | |
2456 DEBUG(dbgs() << "Evaluated bitcast: " << *Val << "\n"); | |
2457 } | |
2458 } | |
2459 | |
2460 MutatedMemory[Ptr] = Val; | |
2461 } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(CurInst)) { | |
2462 InstResult = ConstantExpr::get(BO->getOpcode(), | |
2463 getVal(BO->getOperand(0)), | |
2464 getVal(BO->getOperand(1))); | |
2465 DEBUG(dbgs() << "Found a BinaryOperator! Simplifying: " << *InstResult | |
2466 << "\n"); | |
2467 } else if (CmpInst *CI = dyn_cast<CmpInst>(CurInst)) { | |
2468 InstResult = ConstantExpr::getCompare(CI->getPredicate(), | |
2469 getVal(CI->getOperand(0)), | |
2470 getVal(CI->getOperand(1))); | |
2471 DEBUG(dbgs() << "Found a CmpInst! Simplifying: " << *InstResult | |
2472 << "\n"); | |
2473 } else if (CastInst *CI = dyn_cast<CastInst>(CurInst)) { | |
2474 InstResult = ConstantExpr::getCast(CI->getOpcode(), | |
2475 getVal(CI->getOperand(0)), | |
2476 CI->getType()); | |
2477 DEBUG(dbgs() << "Found a Cast! Simplifying: " << *InstResult | |
2478 << "\n"); | |
2479 } else if (SelectInst *SI = dyn_cast<SelectInst>(CurInst)) { | |
2480 InstResult = ConstantExpr::getSelect(getVal(SI->getOperand(0)), | |
2481 getVal(SI->getOperand(1)), | |
2482 getVal(SI->getOperand(2))); | |
2483 DEBUG(dbgs() << "Found a Select! Simplifying: " << *InstResult | |
2484 << "\n"); | |
2485 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(CurInst)) { | |
2486 Constant *P = getVal(GEP->getOperand(0)); | |
2487 SmallVector<Constant*, 8> GEPOps; | |
2488 for (User::op_iterator i = GEP->op_begin() + 1, e = GEP->op_end(); | |
2489 i != e; ++i) | |
2490 GEPOps.push_back(getVal(*i)); | |
2491 InstResult = | |
2492 ConstantExpr::getGetElementPtr(P, GEPOps, | |
2493 cast<GEPOperator>(GEP)->isInBounds()); | |
2494 DEBUG(dbgs() << "Found a GEP! Simplifying: " << *InstResult | |
2495 << "\n"); | |
2496 } else if (LoadInst *LI = dyn_cast<LoadInst>(CurInst)) { | |
2497 | |
2498 if (!LI->isSimple()) { | |
2499 DEBUG(dbgs() << "Found a Load! Not a simple load, can not evaluate.\n"); | |
2500 return false; // no volatile/atomic accesses. | |
2501 } | |
2502 | |
2503 Constant *Ptr = getVal(LI->getOperand(0)); | |
2504 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ptr)) { | |
2505 Ptr = ConstantFoldConstantExpression(CE, TD, TLI); | |
2506 DEBUG(dbgs() << "Found a constant pointer expression, constant " | |
2507 "folding: " << *Ptr << "\n"); | |
2508 } | |
2509 InstResult = ComputeLoadResult(Ptr); | |
2510 if (InstResult == 0) { | |
2511 DEBUG(dbgs() << "Failed to compute load result. Can not evaluate load." | |
2512 "\n"); | |
2513 return false; // Could not evaluate load. | |
2514 } | |
2515 | |
2516 DEBUG(dbgs() << "Evaluated load: " << *InstResult << "\n"); | |
2517 } else if (AllocaInst *AI = dyn_cast<AllocaInst>(CurInst)) { | |
2518 if (AI->isArrayAllocation()) { | |
2519 DEBUG(dbgs() << "Found an array alloca. Can not evaluate.\n"); | |
2520 return false; // Cannot handle array allocs. | |
2521 } | |
2522 Type *Ty = AI->getType()->getElementType(); | |
2523 AllocaTmps.push_back(new GlobalVariable(Ty, false, | |
2524 GlobalValue::InternalLinkage, | |
2525 UndefValue::get(Ty), | |
2526 AI->getName())); | |
2527 InstResult = AllocaTmps.back(); | |
2528 DEBUG(dbgs() << "Found an alloca. Result: " << *InstResult << "\n"); | |
2529 } else if (isa<CallInst>(CurInst) || isa<InvokeInst>(CurInst)) { | |
2530 CallSite CS(CurInst); | |
2531 | |
2532 // Debug info can safely be ignored here. | |
2533 if (isa<DbgInfoIntrinsic>(CS.getInstruction())) { | |
2534 DEBUG(dbgs() << "Ignoring debug info.\n"); | |
2535 ++CurInst; | |
2536 continue; | |
2537 } | |
2538 | |
2539 // Cannot handle inline asm. | |
2540 if (isa<InlineAsm>(CS.getCalledValue())) { | |
2541 DEBUG(dbgs() << "Found inline asm, can not evaluate.\n"); | |
2542 return false; | |
2543 } | |
2544 | |
2545 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction())) { | |
2546 if (MemSetInst *MSI = dyn_cast<MemSetInst>(II)) { | |
2547 if (MSI->isVolatile()) { | |
2548 DEBUG(dbgs() << "Can not optimize a volatile memset " << | |
2549 "intrinsic.\n"); | |
2550 return false; | |
2551 } | |
2552 Constant *Ptr = getVal(MSI->getDest()); | |
2553 Constant *Val = getVal(MSI->getValue()); | |
2554 Constant *DestVal = ComputeLoadResult(getVal(Ptr)); | |
2555 if (Val->isNullValue() && DestVal && DestVal->isNullValue()) { | |
2556 // This memset is a no-op. | |
2557 DEBUG(dbgs() << "Ignoring no-op memset.\n"); | |
2558 ++CurInst; | |
2559 continue; | |
2560 } | |
2561 } | |
2562 | |
2563 if (II->getIntrinsicID() == Intrinsic::lifetime_start || | |
2564 II->getIntrinsicID() == Intrinsic::lifetime_end) { | |
2565 DEBUG(dbgs() << "Ignoring lifetime intrinsic.\n"); | |
2566 ++CurInst; | |
2567 continue; | |
2568 } | |
2569 | |
2570 if (II->getIntrinsicID() == Intrinsic::invariant_start) { | |
2571 // We don't insert an entry into Values, as it doesn't have a | |
2572 // meaningful return value. | |
2573 if (!II->use_empty()) { | |
2574 DEBUG(dbgs() << "Found unused invariant_start. Cant evaluate.\n"); | |
2575 return false; | |
2576 } | |
2577 ConstantInt *Size = cast<ConstantInt>(II->getArgOperand(0)); | |
2578 Value *PtrArg = getVal(II->getArgOperand(1)); | |
2579 Value *Ptr = PtrArg->stripPointerCasts(); | |
2580 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Ptr)) { | |
2581 Type *ElemTy = cast<PointerType>(GV->getType())->getElementType(); | |
2582 if (TD && !Size->isAllOnesValue() && | |
2583 Size->getValue().getLimitedValue() >= | |
2584 TD->getTypeStoreSize(ElemTy)) { | |
2585 Invariants.insert(GV); | |
2586 DEBUG(dbgs() << "Found a global var that is an invariant: " << *GV | |
2587 << "\n"); | |
2588 } else { | |
2589 DEBUG(dbgs() << "Found a global var, but can not treat it as an " | |
2590 "invariant.\n"); | |
2591 } | |
2592 } | |
2593 // Continue even if we do nothing. | |
2594 ++CurInst; | |
2595 continue; | |
2596 } | |
2597 | |
2598 DEBUG(dbgs() << "Unknown intrinsic. Can not evaluate.\n"); | |
2599 return false; | |
2600 } | |
2601 | |
2602 // Resolve function pointers. | |
2603 Function *Callee = dyn_cast<Function>(getVal(CS.getCalledValue())); | |
2604 if (!Callee || Callee->mayBeOverridden()) { | |
2605 DEBUG(dbgs() << "Can not resolve function pointer.\n"); | |
2606 return false; // Cannot resolve. | |
2607 } | |
2608 | |
2609 SmallVector<Constant*, 8> Formals; | |
2610 for (User::op_iterator i = CS.arg_begin(), e = CS.arg_end(); i != e; ++i) | |
2611 Formals.push_back(getVal(*i)); | |
2612 | |
2613 if (Callee->isDeclaration()) { | |
2614 // If this is a function we can constant fold, do it. | |
2615 if (Constant *C = ConstantFoldCall(Callee, Formals, TLI)) { | |
2616 InstResult = C; | |
2617 DEBUG(dbgs() << "Constant folded function call. Result: " << | |
2618 *InstResult << "\n"); | |
2619 } else { | |
2620 DEBUG(dbgs() << "Can not constant fold function call.\n"); | |
2621 return false; | |
2622 } | |
2623 } else { | |
2624 if (Callee->getFunctionType()->isVarArg()) { | |
2625 DEBUG(dbgs() << "Can not constant fold vararg function call.\n"); | |
2626 return false; | |
2627 } | |
2628 | |
2629 Constant *RetVal = 0; | |
2630 // Execute the call, if successful, use the return value. | |
2631 ValueStack.push_back(new DenseMap<Value*, Constant*>); | |
2632 if (!EvaluateFunction(Callee, RetVal, Formals)) { | |
2633 DEBUG(dbgs() << "Failed to evaluate function.\n"); | |
2634 return false; | |
2635 } | |
2636 delete ValueStack.pop_back_val(); | |
2637 InstResult = RetVal; | |
2638 | |
2639 if (InstResult != NULL) { | |
2640 DEBUG(dbgs() << "Successfully evaluated function. Result: " << | |
2641 InstResult << "\n\n"); | |
2642 } else { | |
2643 DEBUG(dbgs() << "Successfully evaluated function. Result: 0\n\n"); | |
2644 } | |
2645 } | |
2646 } else if (isa<TerminatorInst>(CurInst)) { | |
2647 DEBUG(dbgs() << "Found a terminator instruction.\n"); | |
2648 | |
2649 if (BranchInst *BI = dyn_cast<BranchInst>(CurInst)) { | |
2650 if (BI->isUnconditional()) { | |
2651 NextBB = BI->getSuccessor(0); | |
2652 } else { | |
2653 ConstantInt *Cond = | |
2654 dyn_cast<ConstantInt>(getVal(BI->getCondition())); | |
2655 if (!Cond) return false; // Cannot determine. | |
2656 | |
2657 NextBB = BI->getSuccessor(!Cond->getZExtValue()); | |
2658 } | |
2659 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(CurInst)) { | |
2660 ConstantInt *Val = | |
2661 dyn_cast<ConstantInt>(getVal(SI->getCondition())); | |
2662 if (!Val) return false; // Cannot determine. | |
2663 NextBB = SI->findCaseValue(Val).getCaseSuccessor(); | |
2664 } else if (IndirectBrInst *IBI = dyn_cast<IndirectBrInst>(CurInst)) { | |
2665 Value *Val = getVal(IBI->getAddress())->stripPointerCasts(); | |
2666 if (BlockAddress *BA = dyn_cast<BlockAddress>(Val)) | |
2667 NextBB = BA->getBasicBlock(); | |
2668 else | |
2669 return false; // Cannot determine. | |
2670 } else if (isa<ReturnInst>(CurInst)) { | |
2671 NextBB = 0; | |
2672 } else { | |
2673 // invoke, unwind, resume, unreachable. | |
2674 DEBUG(dbgs() << "Can not handle terminator."); | |
2675 return false; // Cannot handle this terminator. | |
2676 } | |
2677 | |
2678 // We succeeded at evaluating this block! | |
2679 DEBUG(dbgs() << "Successfully evaluated block.\n"); | |
2680 return true; | |
2681 } else { | |
2682 // Did not know how to evaluate this! | |
2683 DEBUG(dbgs() << "Failed to evaluate block due to unhandled instruction." | |
2684 "\n"); | |
2685 return false; | |
2686 } | |
2687 | |
2688 if (!CurInst->use_empty()) { | |
2689 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(InstResult)) | |
2690 InstResult = ConstantFoldConstantExpression(CE, TD, TLI); | |
2691 | |
2692 setVal(CurInst, InstResult); | |
2693 } | |
2694 | |
2695 // If we just processed an invoke, we finished evaluating the block. | |
2696 if (InvokeInst *II = dyn_cast<InvokeInst>(CurInst)) { | |
2697 NextBB = II->getNormalDest(); | |
2698 DEBUG(dbgs() << "Found an invoke instruction. Finished Block.\n\n"); | |
2699 return true; | |
2700 } | |
2701 | |
2702 // Advance program counter. | |
2703 ++CurInst; | |
2704 } | |
2705 } | |
2706 | |
2707 /// EvaluateFunction - Evaluate a call to function F, returning true if | |
2708 /// successful, false if we can't evaluate it. ActualArgs contains the formal | |
2709 /// arguments for the function. | |
2710 bool Evaluator::EvaluateFunction(Function *F, Constant *&RetVal, | |
2711 const SmallVectorImpl<Constant*> &ActualArgs) { | |
2712 // Check to see if this function is already executing (recursion). If so, | |
2713 // bail out. TODO: we might want to accept limited recursion. | |
2714 if (std::find(CallStack.begin(), CallStack.end(), F) != CallStack.end()) | |
2715 return false; | |
2716 | |
2717 CallStack.push_back(F); | |
2718 | |
2719 // Initialize arguments to the incoming values specified. | |
2720 unsigned ArgNo = 0; | |
2721 for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end(); AI != E; | |
2722 ++AI, ++ArgNo) | |
2723 setVal(AI, ActualArgs[ArgNo]); | |
2724 | |
2725 // ExecutedBlocks - We only handle non-looping, non-recursive code. As such, | |
2726 // we can only evaluate any one basic block at most once. This set keeps | |
2727 // track of what we have executed so we can detect recursive cases etc. | |
2728 SmallPtrSet<BasicBlock*, 32> ExecutedBlocks; | |
2729 | |
2730 // CurBB - The current basic block we're evaluating. | |
2731 BasicBlock *CurBB = F->begin(); | |
2732 | |
2733 BasicBlock::iterator CurInst = CurBB->begin(); | |
2734 | |
2735 while (1) { | |
2736 BasicBlock *NextBB = 0; // Initialized to avoid compiler warnings. | |
2737 DEBUG(dbgs() << "Trying to evaluate BB: " << *CurBB << "\n"); | |
2738 | |
2739 if (!EvaluateBlock(CurInst, NextBB)) | |
2740 return false; | |
2741 | |
2742 if (NextBB == 0) { | |
2743 // Successfully running until there's no next block means that we found | |
2744 // the return. Fill it the return value and pop the call stack. | |
2745 ReturnInst *RI = cast<ReturnInst>(CurBB->getTerminator()); | |
2746 if (RI->getNumOperands()) | |
2747 RetVal = getVal(RI->getOperand(0)); | |
2748 CallStack.pop_back(); | |
2749 return true; | |
2750 } | |
2751 | |
2752 // Okay, we succeeded in evaluating this control flow. See if we have | |
2753 // executed the new block before. If so, we have a looping function, | |
2754 // which we cannot evaluate in reasonable time. | |
2755 if (!ExecutedBlocks.insert(NextBB)) | |
2756 return false; // looped! | |
2757 | |
2758 // Okay, we have never been in this block before. Check to see if there | |
2759 // are any PHI nodes. If so, evaluate them with information about where | |
2760 // we came from. | |
2761 PHINode *PN = 0; | |
2762 for (CurInst = NextBB->begin(); | |
2763 (PN = dyn_cast<PHINode>(CurInst)); ++CurInst) | |
2764 setVal(PN, getVal(PN->getIncomingValueForBlock(CurBB))); | |
2765 | |
2766 // Advance to the next block. | |
2767 CurBB = NextBB; | |
2768 } | |
2769 } | |
2770 | |
2771 /// EvaluateStaticConstructor - Evaluate static constructors in the function, if | |
2772 /// we can. Return true if we can, false otherwise. | |
2773 static bool EvaluateStaticConstructor(Function *F, const DataLayout *TD, | |
2774 const TargetLibraryInfo *TLI) { | |
2775 // Call the function. | |
2776 Evaluator Eval(TD, TLI); | |
2777 Constant *RetValDummy; | |
2778 bool EvalSuccess = Eval.EvaluateFunction(F, RetValDummy, | |
2779 SmallVector<Constant*, 0>()); | |
2780 | |
2781 if (EvalSuccess) { | |
2782 // We succeeded at evaluation: commit the result. | |
2783 DEBUG(dbgs() << "FULLY EVALUATED GLOBAL CTOR FUNCTION '" | |
2784 << F->getName() << "' to " << Eval.getMutatedMemory().size() | |
2785 << " stores.\n"); | |
2786 for (DenseMap<Constant*, Constant*>::const_iterator I = | |
2787 Eval.getMutatedMemory().begin(), E = Eval.getMutatedMemory().end(); | |
2788 I != E; ++I) | |
2789 CommitValueTo(I->second, I->first); | |
2790 for (SmallPtrSet<GlobalVariable*, 8>::const_iterator I = | |
2791 Eval.getInvariants().begin(), E = Eval.getInvariants().end(); | |
2792 I != E; ++I) | |
2793 (*I)->setConstant(true); | |
2794 } | |
2795 | |
2796 return EvalSuccess; | |
2797 } | |
2798 | |
2799 /// OptimizeGlobalCtorsList - Simplify and evaluation global ctors if possible. | |
2800 /// Return true if anything changed. | |
2801 bool GlobalOpt::OptimizeGlobalCtorsList(GlobalVariable *&GCL) { | |
2802 std::vector<Function*> Ctors = ParseGlobalCtors(GCL); | |
2803 bool MadeChange = false; | |
2804 if (Ctors.empty()) return false; | |
2805 | |
2806 // Loop over global ctors, optimizing them when we can. | |
2807 for (unsigned i = 0; i != Ctors.size(); ++i) { | |
2808 Function *F = Ctors[i]; | |
2809 // Found a null terminator in the middle of the list, prune off the rest of | |
2810 // the list. | |
2811 if (F == 0) { | |
2812 if (i != Ctors.size()-1) { | |
2813 Ctors.resize(i+1); | |
2814 MadeChange = true; | |
2815 } | |
2816 break; | |
2817 } | |
2818 DEBUG(dbgs() << "Optimizing Global Constructor: " << *F << "\n"); | |
2819 | |
2820 // We cannot simplify external ctor functions. | |
2821 if (F->empty()) continue; | |
2822 | |
2823 // If we can evaluate the ctor at compile time, do. | |
2824 if (EvaluateStaticConstructor(F, TD, TLI)) { | |
2825 Ctors.erase(Ctors.begin()+i); | |
2826 MadeChange = true; | |
2827 --i; | |
2828 ++NumCtorsEvaluated; | |
2829 continue; | |
2830 } | |
2831 } | |
2832 | |
2833 if (!MadeChange) return false; | |
2834 | |
2835 GCL = InstallGlobalCtors(GCL, Ctors); | |
2836 return true; | |
2837 } | |
2838 | |
2839 static int compareNames(Constant *const *A, Constant *const *B) { | |
2840 return (*A)->getName().compare((*B)->getName()); | |
2841 } | |
2842 | |
2843 static void setUsedInitializer(GlobalVariable &V, | |
2844 SmallPtrSet<GlobalValue *, 8> Init) { | |
2845 if (Init.empty()) { | |
2846 V.eraseFromParent(); | |
2847 return; | |
2848 } | |
2849 | |
2850 SmallVector<llvm::Constant *, 8> UsedArray; | |
2851 PointerType *Int8PtrTy = Type::getInt8PtrTy(V.getContext()); | |
2852 | |
2853 for (SmallPtrSet<GlobalValue *, 8>::iterator I = Init.begin(), E = Init.end(); | |
2854 I != E; ++I) { | |
2855 Constant *Cast = llvm::ConstantExpr::getBitCast(*I, Int8PtrTy); | |
2856 UsedArray.push_back(Cast); | |
2857 } | |
2858 // Sort to get deterministic order. | |
2859 array_pod_sort(UsedArray.begin(), UsedArray.end(), compareNames); | |
2860 ArrayType *ATy = ArrayType::get(Int8PtrTy, UsedArray.size()); | |
2861 | |
2862 Module *M = V.getParent(); | |
2863 V.removeFromParent(); | |
2864 GlobalVariable *NV = | |
2865 new GlobalVariable(*M, ATy, false, llvm::GlobalValue::AppendingLinkage, | |
2866 llvm::ConstantArray::get(ATy, UsedArray), ""); | |
2867 NV->takeName(&V); | |
2868 NV->setSection("llvm.metadata"); | |
2869 delete &V; | |
2870 } | |
2871 | |
2872 namespace { | |
2873 /// \brief An easy to access representation of llvm.used and llvm.compiler.used. | |
2874 class LLVMUsed { | |
2875 SmallPtrSet<GlobalValue *, 8> Used; | |
2876 SmallPtrSet<GlobalValue *, 8> CompilerUsed; | |
2877 GlobalVariable *UsedV; | |
2878 GlobalVariable *CompilerUsedV; | |
2879 | |
2880 public: | |
2881 LLVMUsed(Module &M) { | |
2882 UsedV = collectUsedGlobalVariables(M, Used, false); | |
2883 CompilerUsedV = collectUsedGlobalVariables(M, CompilerUsed, true); | |
2884 } | |
2885 typedef SmallPtrSet<GlobalValue *, 8>::iterator iterator; | |
2886 iterator usedBegin() { return Used.begin(); } | |
2887 iterator usedEnd() { return Used.end(); } | |
2888 iterator compilerUsedBegin() { return CompilerUsed.begin(); } | |
2889 iterator compilerUsedEnd() { return CompilerUsed.end(); } | |
2890 bool usedCount(GlobalValue *GV) const { return Used.count(GV); } | |
2891 bool compilerUsedCount(GlobalValue *GV) const { | |
2892 return CompilerUsed.count(GV); | |
2893 } | |
2894 bool usedErase(GlobalValue *GV) { return Used.erase(GV); } | |
2895 bool compilerUsedErase(GlobalValue *GV) { return CompilerUsed.erase(GV); } | |
2896 bool usedInsert(GlobalValue *GV) { return Used.insert(GV); } | |
2897 bool compilerUsedInsert(GlobalValue *GV) { return CompilerUsed.insert(GV); } | |
2898 | |
2899 void syncVariablesAndSets() { | |
2900 if (UsedV) | |
2901 setUsedInitializer(*UsedV, Used); | |
2902 if (CompilerUsedV) | |
2903 setUsedInitializer(*CompilerUsedV, CompilerUsed); | |
2904 } | |
2905 }; | |
2906 } | |
2907 | |
2908 static bool hasUseOtherThanLLVMUsed(GlobalAlias &GA, const LLVMUsed &U) { | |
2909 if (GA.use_empty()) // No use at all. | |
2910 return false; | |
2911 | |
2912 assert((!U.usedCount(&GA) || !U.compilerUsedCount(&GA)) && | |
2913 "We should have removed the duplicated " | |
2914 "element from llvm.compiler.used"); | |
2915 if (!GA.hasOneUse()) | |
2916 // Strictly more than one use. So at least one is not in llvm.used and | |
2917 // llvm.compiler.used. | |
2918 return true; | |
2919 | |
2920 // Exactly one use. Check if it is in llvm.used or llvm.compiler.used. | |
2921 return !U.usedCount(&GA) && !U.compilerUsedCount(&GA); | |
2922 } | |
2923 | |
2924 static bool hasMoreThanOneUseOtherThanLLVMUsed(GlobalValue &V, | |
2925 const LLVMUsed &U) { | |
2926 unsigned N = 2; | |
2927 assert((!U.usedCount(&V) || !U.compilerUsedCount(&V)) && | |
2928 "We should have removed the duplicated " | |
2929 "element from llvm.compiler.used"); | |
2930 if (U.usedCount(&V) || U.compilerUsedCount(&V)) | |
2931 ++N; | |
2932 return V.hasNUsesOrMore(N); | |
2933 } | |
2934 | |
2935 static bool mayHaveOtherReferences(GlobalAlias &GA, const LLVMUsed &U) { | |
2936 if (!GA.hasLocalLinkage()) | |
2937 return true; | |
2938 | |
2939 return U.usedCount(&GA) || U.compilerUsedCount(&GA); | |
2940 } | |
2941 | |
2942 static bool hasUsesToReplace(GlobalAlias &GA, LLVMUsed &U, bool &RenameTarget) { | |
2943 RenameTarget = false; | |
2944 bool Ret = false; | |
2945 if (hasUseOtherThanLLVMUsed(GA, U)) | |
2946 Ret = true; | |
2947 | |
2948 // If the alias is externally visible, we may still be able to simplify it. | |
2949 if (!mayHaveOtherReferences(GA, U)) | |
2950 return Ret; | |
2951 | |
2952 // If the aliasee has internal linkage, give it the name and linkage | |
2953 // of the alias, and delete the alias. This turns: | |
2954 // define internal ... @f(...) | |
2955 // @a = alias ... @f | |
2956 // into: | |
2957 // define ... @a(...) | |
2958 Constant *Aliasee = GA.getAliasee(); | |
2959 GlobalValue *Target = cast<GlobalValue>(Aliasee->stripPointerCasts()); | |
2960 if (!Target->hasLocalLinkage()) | |
2961 return Ret; | |
2962 | |
2963 // Do not perform the transform if multiple aliases potentially target the | |
2964 // aliasee. This check also ensures that it is safe to replace the section | |
2965 // and other attributes of the aliasee with those of the alias. | |
2966 if (hasMoreThanOneUseOtherThanLLVMUsed(*Target, U)) | |
2967 return Ret; | |
2968 | |
2969 RenameTarget = true; | |
2970 return true; | |
2971 } | |
2972 | |
2973 bool GlobalOpt::OptimizeGlobalAliases(Module &M) { | |
2974 bool Changed = false; | |
2975 LLVMUsed Used(M); | |
2976 | |
2977 for (SmallPtrSet<GlobalValue *, 8>::iterator I = Used.usedBegin(), | |
2978 E = Used.usedEnd(); | |
2979 I != E; ++I) | |
2980 Used.compilerUsedErase(*I); | |
2981 | |
2982 for (Module::alias_iterator I = M.alias_begin(), E = M.alias_end(); | |
2983 I != E;) { | |
2984 Module::alias_iterator J = I++; | |
2985 // Aliases without names cannot be referenced outside this module. | |
2986 if (!J->hasName() && !J->isDeclaration()) | |
2987 J->setLinkage(GlobalValue::InternalLinkage); | |
2988 // If the aliasee may change at link time, nothing can be done - bail out. | |
2989 if (J->mayBeOverridden()) | |
2990 continue; | |
2991 | |
2992 Constant *Aliasee = J->getAliasee(); | |
2993 GlobalValue *Target = cast<GlobalValue>(Aliasee->stripPointerCasts()); | |
2994 Target->removeDeadConstantUsers(); | |
2995 | |
2996 // Make all users of the alias use the aliasee instead. | |
2997 bool RenameTarget; | |
2998 if (!hasUsesToReplace(*J, Used, RenameTarget)) | |
2999 continue; | |
3000 | |
3001 J->replaceAllUsesWith(Aliasee); | |
3002 ++NumAliasesResolved; | |
3003 Changed = true; | |
3004 | |
3005 if (RenameTarget) { | |
3006 // Give the aliasee the name, linkage and other attributes of the alias. | |
3007 Target->takeName(J); | |
3008 Target->setLinkage(J->getLinkage()); | |
3009 Target->GlobalValue::copyAttributesFrom(J); | |
3010 | |
3011 if (Used.usedErase(J)) | |
3012 Used.usedInsert(Target); | |
3013 | |
3014 if (Used.compilerUsedErase(J)) | |
3015 Used.compilerUsedInsert(Target); | |
3016 } else if (mayHaveOtherReferences(*J, Used)) | |
3017 continue; | |
3018 | |
3019 // Delete the alias. | |
3020 M.getAliasList().erase(J); | |
3021 ++NumAliasesRemoved; | |
3022 Changed = true; | |
3023 } | |
3024 | |
3025 Used.syncVariablesAndSets(); | |
3026 | |
3027 return Changed; | |
3028 } | |
3029 | |
3030 static Function *FindCXAAtExit(Module &M, TargetLibraryInfo *TLI) { | |
3031 if (!TLI->has(LibFunc::cxa_atexit)) | |
3032 return 0; | |
3033 | |
3034 Function *Fn = M.getFunction(TLI->getName(LibFunc::cxa_atexit)); | |
3035 | |
3036 if (!Fn) | |
3037 return 0; | |
3038 | |
3039 FunctionType *FTy = Fn->getFunctionType(); | |
3040 | |
3041 // Checking that the function has the right return type, the right number of | |
3042 // parameters and that they all have pointer types should be enough. | |
3043 if (!FTy->getReturnType()->isIntegerTy() || | |
3044 FTy->getNumParams() != 3 || | |
3045 !FTy->getParamType(0)->isPointerTy() || | |
3046 !FTy->getParamType(1)->isPointerTy() || | |
3047 !FTy->getParamType(2)->isPointerTy()) | |
3048 return 0; | |
3049 | |
3050 return Fn; | |
3051 } | |
3052 | |
3053 /// cxxDtorIsEmpty - Returns whether the given function is an empty C++ | |
3054 /// destructor and can therefore be eliminated. | |
3055 /// Note that we assume that other optimization passes have already simplified | |
3056 /// the code so we only look for a function with a single basic block, where | |
3057 /// the only allowed instructions are 'ret', 'call' to an empty C++ dtor and | |
3058 /// other side-effect free instructions. | |
3059 static bool cxxDtorIsEmpty(const Function &Fn, | |
3060 SmallPtrSet<const Function *, 8> &CalledFunctions) { | |
3061 // FIXME: We could eliminate C++ destructors if they're readonly/readnone and | |
3062 // nounwind, but that doesn't seem worth doing. | |
3063 if (Fn.isDeclaration()) | |
3064 return false; | |
3065 | |
3066 if (++Fn.begin() != Fn.end()) | |
3067 return false; | |
3068 | |
3069 const BasicBlock &EntryBlock = Fn.getEntryBlock(); | |
3070 for (BasicBlock::const_iterator I = EntryBlock.begin(), E = EntryBlock.end(); | |
3071 I != E; ++I) { | |
3072 if (const CallInst *CI = dyn_cast<CallInst>(I)) { | |
3073 // Ignore debug intrinsics. | |
3074 if (isa<DbgInfoIntrinsic>(CI)) | |
3075 continue; | |
3076 | |
3077 const Function *CalledFn = CI->getCalledFunction(); | |
3078 | |
3079 if (!CalledFn) | |
3080 return false; | |
3081 | |
3082 SmallPtrSet<const Function *, 8> NewCalledFunctions(CalledFunctions); | |
3083 | |
3084 // Don't treat recursive functions as empty. | |
3085 if (!NewCalledFunctions.insert(CalledFn)) | |
3086 return false; | |
3087 | |
3088 if (!cxxDtorIsEmpty(*CalledFn, NewCalledFunctions)) | |
3089 return false; | |
3090 } else if (isa<ReturnInst>(*I)) | |
3091 return true; // We're done. | |
3092 else if (I->mayHaveSideEffects()) | |
3093 return false; // Destructor with side effects, bail. | |
3094 } | |
3095 | |
3096 return false; | |
3097 } | |
3098 | |
3099 bool GlobalOpt::OptimizeEmptyGlobalCXXDtors(Function *CXAAtExitFn) { | |
3100 /// Itanium C++ ABI p3.3.5: | |
3101 /// | |
3102 /// After constructing a global (or local static) object, that will require | |
3103 /// destruction on exit, a termination function is registered as follows: | |
3104 /// | |
3105 /// extern "C" int __cxa_atexit ( void (*f)(void *), void *p, void *d ); | |
3106 /// | |
3107 /// This registration, e.g. __cxa_atexit(f,p,d), is intended to cause the | |
3108 /// call f(p) when DSO d is unloaded, before all such termination calls | |
3109 /// registered before this one. It returns zero if registration is | |
3110 /// successful, nonzero on failure. | |
3111 | |
3112 // This pass will look for calls to __cxa_atexit where the function is trivial | |
3113 // and remove them. | |
3114 bool Changed = false; | |
3115 | |
3116 for (Function::use_iterator I = CXAAtExitFn->use_begin(), | |
3117 E = CXAAtExitFn->use_end(); I != E;) { | |
3118 // We're only interested in calls. Theoretically, we could handle invoke | |
3119 // instructions as well, but neither llvm-gcc nor clang generate invokes | |
3120 // to __cxa_atexit. | |
3121 CallInst *CI = dyn_cast<CallInst>(*I++); | |
3122 if (!CI) | |
3123 continue; | |
3124 | |
3125 Function *DtorFn = | |
3126 dyn_cast<Function>(CI->getArgOperand(0)->stripPointerCasts()); | |
3127 if (!DtorFn) | |
3128 continue; | |
3129 | |
3130 SmallPtrSet<const Function *, 8> CalledFunctions; | |
3131 if (!cxxDtorIsEmpty(*DtorFn, CalledFunctions)) | |
3132 continue; | |
3133 | |
3134 // Just remove the call. | |
3135 CI->replaceAllUsesWith(Constant::getNullValue(CI->getType())); | |
3136 CI->eraseFromParent(); | |
3137 | |
3138 ++NumCXXDtorsRemoved; | |
3139 | |
3140 Changed |= true; | |
3141 } | |
3142 | |
3143 return Changed; | |
3144 } | |
3145 | |
3146 bool GlobalOpt::runOnModule(Module &M) { | |
3147 bool Changed = false; | |
3148 | |
3149 TD = getAnalysisIfAvailable<DataLayout>(); | |
3150 TLI = &getAnalysis<TargetLibraryInfo>(); | |
3151 | |
3152 // Try to find the llvm.globalctors list. | |
3153 GlobalVariable *GlobalCtors = FindGlobalCtors(M); | |
3154 | |
3155 bool LocalChange = true; | |
3156 while (LocalChange) { | |
3157 LocalChange = false; | |
3158 | |
3159 // Delete functions that are trivially dead, ccc -> fastcc | |
3160 LocalChange |= OptimizeFunctions(M); | |
3161 | |
3162 // Optimize global_ctors list. | |
3163 if (GlobalCtors) | |
3164 LocalChange |= OptimizeGlobalCtorsList(GlobalCtors); | |
3165 | |
3166 // Optimize non-address-taken globals. | |
3167 LocalChange |= OptimizeGlobalVars(M); | |
3168 | |
3169 // Resolve aliases, when possible. | |
3170 LocalChange |= OptimizeGlobalAliases(M); | |
3171 | |
3172 // Try to remove trivial global destructors if they are not removed | |
3173 // already. | |
3174 Function *CXAAtExitFn = FindCXAAtExit(M, TLI); | |
3175 if (CXAAtExitFn) | |
3176 LocalChange |= OptimizeEmptyGlobalCXXDtors(CXAAtExitFn); | |
3177 | |
3178 Changed |= LocalChange; | |
3179 } | |
3180 | |
3181 // TODO: Move all global ctors functions to the end of the module for code | |
3182 // layout. | |
3183 | |
3184 return Changed; | |
3185 } |