Mercurial > hg > CbC > CbC_llvm
diff lib/Transforms/Scalar/LICM.cpp @ 121:803732b1fca8
LLVM 5.0
author | kono |
---|---|
date | Fri, 27 Oct 2017 17:07:41 +0900 |
parents | 1172e4bd9c6f |
children | 3a76565eade5 |
line wrap: on
line diff
--- a/lib/Transforms/Scalar/LICM.cpp Fri Nov 25 19:14:25 2016 +0900 +++ b/lib/Transforms/Scalar/LICM.cpp Fri Oct 27 17:07:41 2017 +0900 @@ -41,8 +41,8 @@ #include "llvm/Analysis/Loads.h" #include "llvm/Analysis/LoopInfo.h" #include "llvm/Analysis/LoopPass.h" -#include "llvm/Analysis/LoopPassManager.h" #include "llvm/Analysis/MemoryBuiltins.h" +#include "llvm/Analysis/OptimizationRemarkEmitter.h" #include "llvm/Analysis/ScalarEvolution.h" #include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h" #include "llvm/Analysis/TargetLibraryInfo.h" @@ -61,6 +61,7 @@ #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Transforms/Scalar.h" +#include "llvm/Transforms/Scalar/LoopPassManager.h" #include "llvm/Transforms/Utils/Local.h" #include "llvm/Transforms/Utils/LoopUtils.h" #include "llvm/Transforms/Utils/SSAUpdater.h" @@ -76,22 +77,31 @@ STATISTIC(NumMovedCalls, "Number of call insts hoisted or sunk"); STATISTIC(NumPromoted, "Number of memory locations promoted to registers"); +/// Memory promotion is enabled by default. static cl::opt<bool> - DisablePromotion("disable-licm-promotion", cl::Hidden, + DisablePromotion("disable-licm-promotion", cl::Hidden, cl::init(false), cl::desc("Disable memory promotion in LICM pass")); +static cl::opt<uint32_t> MaxNumUsesTraversed( + "licm-max-num-uses-traversed", cl::Hidden, cl::init(8), + cl::desc("Max num uses visited for identifying load " + "invariance in loop using invariant start (default = 8)")); + static bool inSubLoop(BasicBlock *BB, Loop *CurLoop, LoopInfo *LI); static bool isNotUsedInLoop(const Instruction &I, const Loop *CurLoop, const LoopSafetyInfo *SafetyInfo); static bool hoist(Instruction &I, const DominatorTree *DT, const Loop *CurLoop, - const LoopSafetyInfo *SafetyInfo); + const LoopSafetyInfo *SafetyInfo, + OptimizationRemarkEmitter *ORE); static bool sink(Instruction &I, const LoopInfo *LI, const DominatorTree *DT, const Loop *CurLoop, AliasSetTracker *CurAST, - const LoopSafetyInfo *SafetyInfo); -static bool isSafeToExecuteUnconditionally(const Instruction &Inst, + const LoopSafetyInfo *SafetyInfo, + OptimizationRemarkEmitter *ORE); +static bool isSafeToExecuteUnconditionally(Instruction &Inst, const DominatorTree *DT, const Loop *CurLoop, const LoopSafetyInfo *SafetyInfo, + OptimizationRemarkEmitter *ORE, const Instruction *CtxI = nullptr); static bool pointerInvalidatedByLoop(Value *V, uint64_t Size, const AAMDNodes &AAInfo, @@ -104,7 +114,8 @@ namespace { struct LoopInvariantCodeMotion { bool runOnLoop(Loop *L, AliasAnalysis *AA, LoopInfo *LI, DominatorTree *DT, - TargetLibraryInfo *TLI, ScalarEvolution *SE, bool DeleteAST); + TargetLibraryInfo *TLI, ScalarEvolution *SE, + OptimizationRemarkEmitter *ORE, bool DeleteAST); DenseMap<Loop *, AliasSetTracker *> &getLoopToAliasSetMap() { return LoopToAliasSetMap; @@ -124,16 +135,27 @@ } bool runOnLoop(Loop *L, LPPassManager &LPM) override { - if (skipLoop(L)) + if (skipLoop(L)) { + // If we have run LICM on a previous loop but now we are skipping + // (because we've hit the opt-bisect limit), we need to clear the + // loop alias information. + for (auto <AS : LICM.getLoopToAliasSetMap()) + delete LTAS.second; + LICM.getLoopToAliasSetMap().clear(); return false; + } auto *SE = getAnalysisIfAvailable<ScalarEvolutionWrapperPass>(); + // For the old PM, we can't use OptimizationRemarkEmitter as an analysis + // pass. Function analyses need to be preserved across loop transformations + // but ORE cannot be preserved (see comment before the pass definition). + OptimizationRemarkEmitter ORE(L->getHeader()->getParent()); return LICM.runOnLoop(L, &getAnalysis<AAResultsWrapperPass>().getAAResults(), &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(), &getAnalysis<DominatorTreeWrapperPass>().getDomTree(), &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(), - SE ? &SE->getSE() : nullptr, false); + SE ? &SE->getSE() : nullptr, &ORE, false); } /// This transformation requires natural loop information & requires that @@ -167,28 +189,27 @@ /// Simple Analysis hook. Delete loop L from alias set map. void deleteAnalysisLoop(Loop *L) override; }; -} +} // namespace -PreservedAnalyses LICMPass::run(Loop &L, LoopAnalysisManager &AM) { +PreservedAnalyses LICMPass::run(Loop &L, LoopAnalysisManager &AM, + LoopStandardAnalysisResults &AR, LPMUpdater &) { const auto &FAM = - AM.getResult<FunctionAnalysisManagerLoopProxy>(L).getManager(); + AM.getResult<FunctionAnalysisManagerLoopProxy>(L, AR).getManager(); Function *F = L.getHeader()->getParent(); - auto *AA = FAM.getCachedResult<AAManager>(*F); - auto *LI = FAM.getCachedResult<LoopAnalysis>(*F); - auto *DT = FAM.getCachedResult<DominatorTreeAnalysis>(*F); - auto *TLI = FAM.getCachedResult<TargetLibraryAnalysis>(*F); - auto *SE = FAM.getCachedResult<ScalarEvolutionAnalysis>(*F); - assert((AA && LI && DT && TLI && SE) && "Analyses for LICM not available"); + auto *ORE = FAM.getCachedResult<OptimizationRemarkEmitterAnalysis>(*F); + // FIXME: This should probably be optional rather than required. + if (!ORE) + report_fatal_error("LICM: OptimizationRemarkEmitterAnalysis not " + "cached at a higher level"); LoopInvariantCodeMotion LICM; - - if (!LICM.runOnLoop(&L, AA, LI, DT, TLI, SE, true)) + if (!LICM.runOnLoop(&L, &AR.AA, &AR.LI, &AR.DT, &AR.TLI, &AR.SE, ORE, true)) return PreservedAnalyses::all(); - // FIXME: There is no setPreservesCFG in the new PM. When that becomes - // available, it should be used here. - return getLoopPassPreservedAnalyses(); + auto PA = getLoopPassPreservedAnalyses(); + PA.preserveSet<CFGAnalyses>(); + return PA; } char LegacyLICMPass::ID = 0; @@ -210,7 +231,9 @@ bool LoopInvariantCodeMotion::runOnLoop(Loop *L, AliasAnalysis *AA, LoopInfo *LI, DominatorTree *DT, TargetLibraryInfo *TLI, - ScalarEvolution *SE, bool DeleteAST) { + ScalarEvolution *SE, + OptimizationRemarkEmitter *ORE, + bool DeleteAST) { bool Changed = false; assert(L->isLCSSAForm(*DT) && "Loop is not in LCSSA form."); @@ -236,31 +259,70 @@ // if (L->hasDedicatedExits()) Changed |= sinkRegion(DT->getNode(L->getHeader()), AA, LI, DT, TLI, L, - CurAST, &SafetyInfo); + CurAST, &SafetyInfo, ORE); if (Preheader) Changed |= hoistRegion(DT->getNode(L->getHeader()), AA, LI, DT, TLI, L, - CurAST, &SafetyInfo); + CurAST, &SafetyInfo, ORE); // Now that all loop invariants have been removed from the loop, promote any // memory references to scalars that we can. - if (!DisablePromotion && (Preheader || L->hasDedicatedExits())) { + // Don't sink stores from loops without dedicated block exits. Exits + // containing indirect branches are not transformed by loop simplify, + // make sure we catch that. An additional load may be generated in the + // preheader for SSA updater, so also avoid sinking when no preheader + // is available. + if (!DisablePromotion && Preheader && L->hasDedicatedExits()) { + // Figure out the loop exits and their insertion points SmallVector<BasicBlock *, 8> ExitBlocks; - SmallVector<Instruction *, 8> InsertPts; - PredIteratorCache PIC; + L->getUniqueExitBlocks(ExitBlocks); + + // We can't insert into a catchswitch. + bool HasCatchSwitch = llvm::any_of(ExitBlocks, [](BasicBlock *Exit) { + return isa<CatchSwitchInst>(Exit->getTerminator()); + }); + + if (!HasCatchSwitch) { + SmallVector<Instruction *, 8> InsertPts; + InsertPts.reserve(ExitBlocks.size()); + for (BasicBlock *ExitBlock : ExitBlocks) + InsertPts.push_back(&*ExitBlock->getFirstInsertionPt()); + + PredIteratorCache PIC; + + bool Promoted = false; - // Loop over all of the alias sets in the tracker object. - for (AliasSet &AS : *CurAST) - Changed |= promoteLoopAccessesToScalars( - AS, ExitBlocks, InsertPts, PIC, LI, DT, TLI, L, CurAST, &SafetyInfo); + // Loop over all of the alias sets in the tracker object. + for (AliasSet &AS : *CurAST) { + // We can promote this alias set if it has a store, if it is a "Must" + // alias set, if the pointer is loop invariant, and if we are not + // eliminating any volatile loads or stores. + if (AS.isForwardingAliasSet() || !AS.isMod() || !AS.isMustAlias() || + AS.isVolatile() || !L->isLoopInvariant(AS.begin()->getValue())) + continue; + + assert( + !AS.empty() && + "Must alias set should have at least one pointer element in it!"); - // Once we have promoted values across the loop body we have to recursively - // reform LCSSA as any nested loop may now have values defined within the - // loop used in the outer loop. - // FIXME: This is really heavy handed. It would be a bit better to use an - // SSAUpdater strategy during promotion that was LCSSA aware and reformed - // it as it went. - if (Changed) { - formLCSSARecursively(*L, *DT, LI, SE); + SmallSetVector<Value *, 8> PointerMustAliases; + for (const auto &ASI : AS) + PointerMustAliases.insert(ASI.getValue()); + + Promoted |= promoteLoopAccessesToScalars(PointerMustAliases, ExitBlocks, + InsertPts, PIC, LI, DT, TLI, L, + CurAST, &SafetyInfo, ORE); + } + + // Once we have promoted values across the loop body we have to + // recursively reform LCSSA as any nested loop may now have values defined + // within the loop used in the outer loop. + // FIXME: This is really heavy handed. It would be a bit better to use an + // SSAUpdater strategy during promotion that was LCSSA aware and reformed + // it as it went. + if (Promoted) + formLCSSARecursively(*L, *DT, LI, SE); + + Changed |= Promoted; } } @@ -290,52 +352,51 @@ /// bool llvm::sinkRegion(DomTreeNode *N, AliasAnalysis *AA, LoopInfo *LI, DominatorTree *DT, TargetLibraryInfo *TLI, Loop *CurLoop, - AliasSetTracker *CurAST, LoopSafetyInfo *SafetyInfo) { + AliasSetTracker *CurAST, LoopSafetyInfo *SafetyInfo, + OptimizationRemarkEmitter *ORE) { // Verify inputs. assert(N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr && CurLoop != nullptr && CurAST != nullptr && SafetyInfo != nullptr && "Unexpected input to sinkRegion"); - BasicBlock *BB = N->getBlock(); - // If this subregion is not in the top level loop at all, exit. - if (!CurLoop->contains(BB)) - return false; + // We want to visit children before parents. We will enque all the parents + // before their children in the worklist and process the worklist in reverse + // order. + SmallVector<DomTreeNode *, 16> Worklist = collectChildrenInLoop(N, CurLoop); - // We are processing blocks in reverse dfo, so process children first. bool Changed = false; - const std::vector<DomTreeNode *> &Children = N->getChildren(); - for (DomTreeNode *Child : Children) - Changed |= sinkRegion(Child, AA, LI, DT, TLI, CurLoop, CurAST, SafetyInfo); + for (DomTreeNode *DTN : reverse(Worklist)) { + BasicBlock *BB = DTN->getBlock(); + // Only need to process the contents of this block if it is not part of a + // subloop (which would already have been processed). + if (inSubLoop(BB, CurLoop, LI)) + continue; - // Only need to process the contents of this block if it is not part of a - // subloop (which would already have been processed). - if (inSubLoop(BB, CurLoop, LI)) - return Changed; - - for (BasicBlock::iterator II = BB->end(); II != BB->begin();) { - Instruction &I = *--II; + for (BasicBlock::iterator II = BB->end(); II != BB->begin();) { + Instruction &I = *--II; - // If the instruction is dead, we would try to sink it because it isn't used - // in the loop, instead, just delete it. - if (isInstructionTriviallyDead(&I, TLI)) { - DEBUG(dbgs() << "LICM deleting dead inst: " << I << '\n'); - ++II; - CurAST->deleteValue(&I); - I.eraseFromParent(); - Changed = true; - continue; - } + // If the instruction is dead, we would try to sink it because it isn't + // used in the loop, instead, just delete it. + if (isInstructionTriviallyDead(&I, TLI)) { + DEBUG(dbgs() << "LICM deleting dead inst: " << I << '\n'); + ++II; + CurAST->deleteValue(&I); + I.eraseFromParent(); + Changed = true; + continue; + } - // Check to see if we can sink this instruction to the exit blocks - // of the loop. We can do this if the all users of the instruction are - // outside of the loop. In this case, it doesn't even matter if the - // operands of the instruction are loop invariant. - // - if (isNotUsedInLoop(I, CurLoop, SafetyInfo) && - canSinkOrHoistInst(I, AA, DT, CurLoop, CurAST, SafetyInfo)) { - ++II; - Changed |= sink(I, LI, DT, CurLoop, CurAST, SafetyInfo); + // Check to see if we can sink this instruction to the exit blocks + // of the loop. We can do this if the all users of the instruction are + // outside of the loop. In this case, it doesn't even matter if the + // operands of the instruction are loop invariant. + // + if (isNotUsedInLoop(I, CurLoop, SafetyInfo) && + canSinkOrHoistInst(I, AA, DT, CurLoop, CurAST, SafetyInfo, ORE)) { + ++II; + Changed |= sink(I, LI, DT, CurLoop, CurAST, SafetyInfo, ORE); + } } } return Changed; @@ -348,54 +409,77 @@ /// bool llvm::hoistRegion(DomTreeNode *N, AliasAnalysis *AA, LoopInfo *LI, DominatorTree *DT, TargetLibraryInfo *TLI, Loop *CurLoop, - AliasSetTracker *CurAST, LoopSafetyInfo *SafetyInfo) { + AliasSetTracker *CurAST, LoopSafetyInfo *SafetyInfo, + OptimizationRemarkEmitter *ORE) { // Verify inputs. assert(N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr && CurLoop != nullptr && CurAST != nullptr && SafetyInfo != nullptr && "Unexpected input to hoistRegion"); - BasicBlock *BB = N->getBlock(); + // We want to visit parents before children. We will enque all the parents + // before their children in the worklist and process the worklist in order. + SmallVector<DomTreeNode *, 16> Worklist = collectChildrenInLoop(N, CurLoop); - // If this subregion is not in the top level loop at all, exit. - if (!CurLoop->contains(BB)) - return false; - - // Only need to process the contents of this block if it is not part of a - // subloop (which would already have been processed). bool Changed = false; - if (!inSubLoop(BB, CurLoop, LI)) - for (BasicBlock::iterator II = BB->begin(), E = BB->end(); II != E;) { - Instruction &I = *II++; - // Try constant folding this instruction. If all the operands are - // constants, it is technically hoistable, but it would be better to just - // fold it. - if (Constant *C = ConstantFoldInstruction( - &I, I.getModule()->getDataLayout(), TLI)) { - DEBUG(dbgs() << "LICM folding inst: " << I << " --> " << *C << '\n'); - CurAST->copyValue(&I, C); - I.replaceAllUsesWith(C); - if (isInstructionTriviallyDead(&I, TLI)) { - CurAST->deleteValue(&I); + for (DomTreeNode *DTN : Worklist) { + BasicBlock *BB = DTN->getBlock(); + // Only need to process the contents of this block if it is not part of a + // subloop (which would already have been processed). + if (!inSubLoop(BB, CurLoop, LI)) + for (BasicBlock::iterator II = BB->begin(), E = BB->end(); II != E;) { + Instruction &I = *II++; + // Try constant folding this instruction. If all the operands are + // constants, it is technically hoistable, but it would be better to + // just fold it. + if (Constant *C = ConstantFoldInstruction( + &I, I.getModule()->getDataLayout(), TLI)) { + DEBUG(dbgs() << "LICM folding inst: " << I << " --> " << *C << '\n'); + CurAST->copyValue(&I, C); + I.replaceAllUsesWith(C); + if (isInstructionTriviallyDead(&I, TLI)) { + CurAST->deleteValue(&I); + I.eraseFromParent(); + } + Changed = true; + continue; + } + + // Attempt to remove floating point division out of the loop by + // converting it to a reciprocal multiplication. + if (I.getOpcode() == Instruction::FDiv && + CurLoop->isLoopInvariant(I.getOperand(1)) && + I.hasAllowReciprocal()) { + auto Divisor = I.getOperand(1); + auto One = llvm::ConstantFP::get(Divisor->getType(), 1.0); + auto ReciprocalDivisor = BinaryOperator::CreateFDiv(One, Divisor); + ReciprocalDivisor->setFastMathFlags(I.getFastMathFlags()); + ReciprocalDivisor->insertBefore(&I); + + auto Product = + BinaryOperator::CreateFMul(I.getOperand(0), ReciprocalDivisor); + Product->setFastMathFlags(I.getFastMathFlags()); + Product->insertAfter(&I); + I.replaceAllUsesWith(Product); I.eraseFromParent(); + + hoist(*ReciprocalDivisor, DT, CurLoop, SafetyInfo, ORE); + Changed = true; + continue; } - continue; + + // Try hoisting the instruction out to the preheader. We can only do + // this if all of the operands of the instruction are loop invariant and + // if it is safe to hoist the instruction. + // + if (CurLoop->hasLoopInvariantOperands(&I) && + canSinkOrHoistInst(I, AA, DT, CurLoop, CurAST, SafetyInfo, ORE) && + isSafeToExecuteUnconditionally( + I, DT, CurLoop, SafetyInfo, ORE, + CurLoop->getLoopPreheader()->getTerminator())) + Changed |= hoist(I, DT, CurLoop, SafetyInfo, ORE); } + } - // Try hoisting the instruction out to the preheader. We can only do this - // if all of the operands of the instruction are loop invariant and if it - // is safe to hoist the instruction. - // - if (CurLoop->hasLoopInvariantOperands(&I) && - canSinkOrHoistInst(I, AA, DT, CurLoop, CurAST, SafetyInfo) && - isSafeToExecuteUnconditionally( - I, DT, CurLoop, SafetyInfo, - CurLoop->getLoopPreheader()->getTerminator())) - Changed |= hoist(I, DT, CurLoop, SafetyInfo); - } - - const std::vector<DomTreeNode *> &Children = N->getChildren(); - for (DomTreeNode *Child : Children) - Changed |= hoistRegion(Child, AA, LI, DT, TLI, CurLoop, CurAST, SafetyInfo); return Changed; } @@ -416,7 +500,11 @@ SafetyInfo->MayThrow = SafetyInfo->HeaderMayThrow; // Iterate over loop instructions and compute safety info. - for (Loop::block_iterator BB = CurLoop->block_begin(), + // Skip header as it has been computed and stored in HeaderMayThrow. + // The first block in loopinfo.Blocks is guaranteed to be the header. + assert(Header == *CurLoop->getBlocks().begin() && + "First block must be header"); + for (Loop::block_iterator BB = std::next(CurLoop->block_begin()), BBE = CurLoop->block_end(); (BB != BBE) && !SafetyInfo->MayThrow; ++BB) for (BasicBlock::iterator I = (*BB)->begin(), E = (*BB)->end(); @@ -432,13 +520,70 @@ SafetyInfo->BlockColors = colorEHFunclets(*Fn); } +// Return true if LI is invariant within scope of the loop. LI is invariant if +// CurLoop is dominated by an invariant.start representing the same memory +// location and size as the memory location LI loads from, and also the +// invariant.start has no uses. +static bool isLoadInvariantInLoop(LoadInst *LI, DominatorTree *DT, + Loop *CurLoop) { + Value *Addr = LI->getOperand(0); + const DataLayout &DL = LI->getModule()->getDataLayout(); + const uint32_t LocSizeInBits = DL.getTypeSizeInBits( + cast<PointerType>(Addr->getType())->getElementType()); + + // if the type is i8 addrspace(x)*, we know this is the type of + // llvm.invariant.start operand + auto *PtrInt8Ty = PointerType::get(Type::getInt8Ty(LI->getContext()), + LI->getPointerAddressSpace()); + unsigned BitcastsVisited = 0; + // Look through bitcasts until we reach the i8* type (this is invariant.start + // operand type). + while (Addr->getType() != PtrInt8Ty) { + auto *BC = dyn_cast<BitCastInst>(Addr); + // Avoid traversing high number of bitcast uses. + if (++BitcastsVisited > MaxNumUsesTraversed || !BC) + return false; + Addr = BC->getOperand(0); + } + + unsigned UsesVisited = 0; + // Traverse all uses of the load operand value, to see if invariant.start is + // one of the uses, and whether it dominates the load instruction. + for (auto *U : Addr->users()) { + // Avoid traversing for Load operand with high number of users. + if (++UsesVisited > MaxNumUsesTraversed) + return false; + IntrinsicInst *II = dyn_cast<IntrinsicInst>(U); + // If there are escaping uses of invariant.start instruction, the load maybe + // non-invariant. + if (!II || II->getIntrinsicID() != Intrinsic::invariant_start || + !II->use_empty()) + continue; + unsigned InvariantSizeInBits = + cast<ConstantInt>(II->getArgOperand(0))->getSExtValue() * 8; + // Confirm the invariant.start location size contains the load operand size + // in bits. Also, the invariant.start should dominate the load, and we + // should not hoist the load out of a loop that contains this dominating + // invariant.start. + if (LocSizeInBits <= InvariantSizeInBits && + DT->properlyDominates(II->getParent(), CurLoop->getHeader())) + return true; + } + + return false; +} + bool llvm::canSinkOrHoistInst(Instruction &I, AAResults *AA, DominatorTree *DT, Loop *CurLoop, AliasSetTracker *CurAST, - LoopSafetyInfo *SafetyInfo) { + LoopSafetyInfo *SafetyInfo, + OptimizationRemarkEmitter *ORE) { + // SafetyInfo is nullptr if we are checking for sinking from preheader to + // loop body. + const bool SinkingToLoopBody = !SafetyInfo; // Loads have extra constraints we have to verify before we can hoist them. if (LoadInst *LI = dyn_cast<LoadInst>(&I)) { if (!LI->isUnordered()) - return false; // Don't hoist volatile/atomic loads! + return false; // Don't sink/hoist volatile or ordered atomic loads! // Loads from constant memory are always safe to move, even if they end up // in the same alias set as something that ends up being modified. @@ -447,6 +592,13 @@ if (LI->getMetadata(LLVMContext::MD_invariant_load)) return true; + if (LI->isAtomic() && SinkingToLoopBody) + return false; // Don't sink unordered atomic loads to loop body. + + // This checks for an invariant.start dominating the load. + if (isLoadInvariantInLoop(LI, DT, CurLoop)) + return true; + // Don't hoist loads which have may-aliased stores in loop. uint64_t Size = 0; if (LI->getType()->isSized()) @@ -455,7 +607,19 @@ AAMDNodes AAInfo; LI->getAAMetadata(AAInfo); - return !pointerInvalidatedByLoop(LI->getOperand(0), Size, AAInfo, CurAST); + bool Invalidated = + pointerInvalidatedByLoop(LI->getOperand(0), Size, AAInfo, CurAST); + // Check loop-invariant address because this may also be a sinkable load + // whose address is not necessarily loop-invariant. + if (ORE && Invalidated && CurLoop->isLoopInvariant(LI->getPointerOperand())) + ORE->emit([&]() { + return OptimizationRemarkMissed( + DEBUG_TYPE, "LoadWithLoopInvariantAddressInvalidated", LI) + << "failed to move load with loop-invariant address " + "because the loop may invalidate its value"; + }); + + return !Invalidated; } else if (CallInst *CI = dyn_cast<CallInst>(&I)) { // Don't sink or hoist dbg info; it's legal, but not useful. if (isa<DbgInfoIntrinsic>(I)) @@ -508,9 +672,9 @@ !isa<InsertValueInst>(I)) return false; - // SafetyInfo is nullptr if we are checking for sinking from preheader to - // loop body. It will be always safe as there is no speculative execution. - if (!SafetyInfo) + // If we are checking for sinking from preheader to loop body it will be + // always safe as there is no speculative execution. + if (SinkingToLoopBody) return true; // TODO: Plumb the context instruction through to make hoisting and sinking @@ -649,8 +813,13 @@ /// static bool sink(Instruction &I, const LoopInfo *LI, const DominatorTree *DT, const Loop *CurLoop, AliasSetTracker *CurAST, - const LoopSafetyInfo *SafetyInfo) { + const LoopSafetyInfo *SafetyInfo, + OptimizationRemarkEmitter *ORE) { DEBUG(dbgs() << "LICM sinking instruction: " << I << "\n"); + ORE->emit([&]() { + return OptimizationRemark(DEBUG_TYPE, "InstSunk", &I) + << "sinking " << ore::NV("Inst", &I); + }); bool Changed = false; if (isa<LoadInst>(I)) ++NumMovedLoads; @@ -717,10 +886,15 @@ /// is safe to hoist, this instruction is called to do the dirty work. /// static bool hoist(Instruction &I, const DominatorTree *DT, const Loop *CurLoop, - const LoopSafetyInfo *SafetyInfo) { + const LoopSafetyInfo *SafetyInfo, + OptimizationRemarkEmitter *ORE) { auto *Preheader = CurLoop->getLoopPreheader(); DEBUG(dbgs() << "LICM hoisting to " << Preheader->getName() << ": " << I << "\n"); + ORE->emit([&]() { + return OptimizationRemark(DEBUG_TYPE, "Hoisted", &I) << "hoisting " + << ore::NV("Inst", &I); + }); // Metadata can be dependent on conditions we are hoisting above. // Conservatively strip all metadata on the instruction unless we were @@ -736,6 +910,14 @@ // Move the new node to the Preheader, before its terminator. I.moveBefore(Preheader->getTerminator()); + // Do not retain debug locations when we are moving instructions to different + // basic blocks, because we want to avoid jumpy line tables. Calls, however, + // need to retain their debug locs because they may be inlined. + // FIXME: How do we retain source locations without causing poor debugging + // behavior? + if (!isa<CallInst>(I)) + I.setDebugLoc(DebugLoc()); + if (isa<LoadInst>(I)) ++NumMovedLoads; else if (isa<CallInst>(I)) @@ -747,21 +929,36 @@ /// Only sink or hoist an instruction if it is not a trapping instruction, /// or if the instruction is known not to trap when moved to the preheader. /// or if it is a trapping instruction and is guaranteed to execute. -static bool isSafeToExecuteUnconditionally(const Instruction &Inst, +static bool isSafeToExecuteUnconditionally(Instruction &Inst, const DominatorTree *DT, const Loop *CurLoop, const LoopSafetyInfo *SafetyInfo, + OptimizationRemarkEmitter *ORE, const Instruction *CtxI) { if (isSafeToSpeculativelyExecute(&Inst, CtxI, DT)) return true; - return isGuaranteedToExecute(Inst, DT, CurLoop, SafetyInfo); + bool GuaranteedToExecute = + isGuaranteedToExecute(Inst, DT, CurLoop, SafetyInfo); + + if (!GuaranteedToExecute) { + auto *LI = dyn_cast<LoadInst>(&Inst); + if (LI && CurLoop->isLoopInvariant(LI->getPointerOperand())) + ORE->emit([&]() { + return OptimizationRemarkMissed( + DEBUG_TYPE, "LoadWithLoopInvariantAddressCondExecuted", LI) + << "failed to hoist load with loop-invariant address " + "because load is conditionally executed"; + }); + } + + return GuaranteedToExecute; } namespace { class LoopPromoter : public LoadAndStorePromoter { Value *SomePtr; // Designated pointer to store to. - SmallPtrSetImpl<Value *> &PointerMustAliases; + const SmallSetVector<Value *, 8> &PointerMustAliases; SmallVectorImpl<BasicBlock *> &LoopExitBlocks; SmallVectorImpl<Instruction *> &LoopInsertPts; PredIteratorCache &PredCache; @@ -769,6 +966,7 @@ LoopInfo &LI; DebugLoc DL; int Alignment; + bool UnorderedAtomic; AAMDNodes AATags; Value *maybeInsertLCSSAPHI(Value *V, BasicBlock *BB) const { @@ -788,14 +986,15 @@ public: LoopPromoter(Value *SP, ArrayRef<const Instruction *> Insts, SSAUpdater &S, - SmallPtrSetImpl<Value *> &PMA, + const SmallSetVector<Value *, 8> &PMA, SmallVectorImpl<BasicBlock *> &LEB, SmallVectorImpl<Instruction *> &LIP, PredIteratorCache &PIC, AliasSetTracker &ast, LoopInfo &li, DebugLoc dl, int alignment, - const AAMDNodes &AATags) + bool UnorderedAtomic, const AAMDNodes &AATags) : LoadAndStorePromoter(Insts, S), SomePtr(SP), PointerMustAliases(PMA), LoopExitBlocks(LEB), LoopInsertPts(LIP), PredCache(PIC), AST(ast), - LI(li), DL(std::move(dl)), Alignment(alignment), AATags(AATags) {} + LI(li), DL(std::move(dl)), Alignment(alignment), + UnorderedAtomic(UnorderedAtomic), AATags(AATags) {} bool isInstInList(Instruction *I, const SmallVectorImpl<Instruction *> &) const override { @@ -819,6 +1018,8 @@ Value *Ptr = maybeInsertLCSSAPHI(SomePtr, ExitBlock); Instruction *InsertPos = LoopInsertPts[i]; StoreInst *NewSI = new StoreInst(LiveInValue, Ptr, InsertPos); + if (UnorderedAtomic) + NewSI->setOrdering(AtomicOrdering::Unordered); NewSI->setAlignment(Alignment); NewSI->setDebugLoc(DL); if (AATags) @@ -832,7 +1033,31 @@ } void instructionDeleted(Instruction *I) const override { AST.deleteValue(I); } }; -} // end anon namespace + + +/// Return true iff we can prove that a caller of this function can not inspect +/// the contents of the provided object in a well defined program. +bool isKnownNonEscaping(Value *Object, const TargetLibraryInfo *TLI) { + if (isa<AllocaInst>(Object)) + // Since the alloca goes out of scope, we know the caller can't retain a + // reference to it and be well defined. Thus, we don't need to check for + // capture. + return true; + + // For all other objects we need to know that the caller can't possibly + // have gotten a reference to the object. There are two components of + // that: + // 1) Object can't be escaped by this function. This is what + // PointerMayBeCaptured checks. + // 2) Object can't have been captured at definition site. For this, we + // need to know the return value is noalias. At the moment, we use a + // weaker condition and handle only AllocLikeFunctions (which are + // known to be noalias). TODO + return isAllocLikeFn(Object, TLI) && + !PointerMayBeCaptured(Object, true, true); +} + +} // namespace /// Try to promote memory values to scalars by sinking stores out of the /// loop and moving loads to before the loop. We do this by looping over @@ -840,26 +1065,18 @@ /// loop invariant. /// bool llvm::promoteLoopAccessesToScalars( - AliasSet &AS, SmallVectorImpl<BasicBlock *> &ExitBlocks, + const SmallSetVector<Value *, 8> &PointerMustAliases, + SmallVectorImpl<BasicBlock *> &ExitBlocks, SmallVectorImpl<Instruction *> &InsertPts, PredIteratorCache &PIC, LoopInfo *LI, DominatorTree *DT, const TargetLibraryInfo *TLI, - Loop *CurLoop, AliasSetTracker *CurAST, LoopSafetyInfo *SafetyInfo) { + Loop *CurLoop, AliasSetTracker *CurAST, LoopSafetyInfo *SafetyInfo, + OptimizationRemarkEmitter *ORE) { // Verify inputs. assert(LI != nullptr && DT != nullptr && CurLoop != nullptr && CurAST != nullptr && SafetyInfo != nullptr && "Unexpected Input to promoteLoopAccessesToScalars"); - // We can promote this alias set if it has a store, if it is a "Must" alias - // set, if the pointer is loop invariant, and if we are not eliminating any - // volatile loads or stores. - if (AS.isForwardingAliasSet() || !AS.isMod() || !AS.isMustAlias() || - AS.isVolatile() || !CurLoop->isLoopInvariant(AS.begin()->getValue())) - return false; - - assert(!AS.empty() && - "Must alias set should have at least one pointer element in it!"); - - Value *SomePtr = AS.begin()->getValue(); + Value *SomePtr = *PointerMustAliases.begin(); BasicBlock *Preheader = CurLoop->getLoopPreheader(); // It isn't safe to promote a load/store from the loop if the load/store is @@ -874,66 +1091,70 @@ // is not safe, because *P may only be valid to access if 'c' is true. // // The safety property divides into two parts: - // 1) The memory may not be dereferenceable on entry to the loop. In this + // p1) The memory may not be dereferenceable on entry to the loop. In this // case, we can't insert the required load in the preheader. - // 2) The memory model does not allow us to insert a store along any dynamic + // p2) The memory model does not allow us to insert a store along any dynamic // path which did not originally have one. // - // It is safe to promote P if all uses are direct load/stores and if at - // least one is guaranteed to be executed. - bool GuaranteedToExecute = false; + // If at least one store is guaranteed to execute, both properties are + // satisfied, and promotion is legal. + // + // This, however, is not a necessary condition. Even if no store/load is + // guaranteed to execute, we can still establish these properties. + // We can establish (p1) by proving that hoisting the load into the preheader + // is safe (i.e. proving dereferenceability on all paths through the loop). We + // can use any access within the alias set to prove dereferenceability, + // since they're all must alias. + // + // There are two ways establish (p2): + // a) Prove the location is thread-local. In this case the memory model + // requirement does not apply, and stores are safe to insert. + // b) Prove a store dominates every exit block. In this case, if an exit + // blocks is reached, the original dynamic path would have taken us through + // the store, so inserting a store into the exit block is safe. Note that this + // is different from the store being guaranteed to execute. For instance, + // if an exception is thrown on the first iteration of the loop, the original + // store is never executed, but the exit blocks are not executed either. - // It is also safe to promote P if we can prove that speculating a load into - // the preheader is safe (i.e. proving dereferenceability on all - // paths through the loop), and that the memory can be proven thread local - // (so that the memory model requirement doesn't apply.) We first establish - // the former, and then run a capture analysis below to establish the later. - // We can use any access within the alias set to prove dereferenceability - // since they're all must alias. - bool CanSpeculateLoad = false; + bool DereferenceableInPH = false; + bool SafeToInsertStore = false; SmallVector<Instruction *, 64> LoopUses; - SmallPtrSet<Value *, 4> PointerMustAliases; // We start with an alignment of one and try to find instructions that allow // us to prove better alignment. unsigned Alignment = 1; + // Keep track of which types of access we see + bool SawUnorderedAtomic = false; + bool SawNotAtomic = false; AAMDNodes AATags; - bool HasDedicatedExits = CurLoop->hasDedicatedExits(); - - // Don't sink stores from loops without dedicated block exits. Exits - // containing indirect branches are not transformed by loop simplify, - // make sure we catch that. An additional load may be generated in the - // preheader for SSA updater, so also avoid sinking when no preheader - // is available. - if (!HasDedicatedExits || !Preheader) - return false; const DataLayout &MDL = Preheader->getModule()->getDataLayout(); + bool IsKnownThreadLocalObject = false; if (SafetyInfo->MayThrow) { // If a loop can throw, we have to insert a store along each unwind edge. // That said, we can't actually make the unwind edge explicit. Therefore, - // we have to prove that the store is dead along the unwind edge. - // - // Currently, this code just special-cases alloca instructions. - if (!isa<AllocaInst>(GetUnderlyingObject(SomePtr, MDL))) + // we have to prove that the store is dead along the unwind edge. We do + // this by proving that the caller can't have a reference to the object + // after return and thus can't possibly load from the object. + Value *Object = GetUnderlyingObject(SomePtr, MDL); + if (!isKnownNonEscaping(Object, TLI)) return false; + // Subtlety: Alloca's aren't visible to callers, but *are* potentially + // visible to other threads if captured and used during their lifetimes. + IsKnownThreadLocalObject = !isa<AllocaInst>(Object); } // Check that all of the pointers in the alias set have the same type. We // cannot (yet) promote a memory location that is loaded and stored in // different sizes. While we are at it, collect alignment and AA info. - bool Changed = false; - for (const auto &ASI : AS) { - Value *ASIV = ASI.getValue(); - PointerMustAliases.insert(ASIV); - + for (Value *ASIV : PointerMustAliases) { // Check that all of the pointers in the alias set have the same type. We // cannot (yet) promote a memory location that is loaded and stored in // different sizes. if (SomePtr->getType() != ASIV->getType()) - return Changed; + return false; for (User *U : ASIV->users()) { // Ignore instructions that are outside the loop. @@ -943,50 +1164,68 @@ // If there is an non-load/store instruction in the loop, we can't promote // it. - if (const LoadInst *Load = dyn_cast<LoadInst>(UI)) { + if (LoadInst *Load = dyn_cast<LoadInst>(UI)) { assert(!Load->isVolatile() && "AST broken"); - if (!Load->isSimple()) - return Changed; + if (!Load->isUnordered()) + return false; - if (!GuaranteedToExecute && !CanSpeculateLoad) - CanSpeculateLoad = isSafeToExecuteUnconditionally( - *Load, DT, CurLoop, SafetyInfo, Preheader->getTerminator()); + SawUnorderedAtomic |= Load->isAtomic(); + SawNotAtomic |= !Load->isAtomic(); + + if (!DereferenceableInPH) + DereferenceableInPH = isSafeToExecuteUnconditionally( + *Load, DT, CurLoop, SafetyInfo, ORE, Preheader->getTerminator()); } else if (const StoreInst *Store = dyn_cast<StoreInst>(UI)) { // Stores *of* the pointer are not interesting, only stores *to* the // pointer. if (UI->getOperand(1) != ASIV) continue; assert(!Store->isVolatile() && "AST broken"); - if (!Store->isSimple()) - return Changed; + if (!Store->isUnordered()) + return false; - // Note that we only check GuaranteedToExecute inside the store case - // so that we do not introduce stores where they did not exist before - // (which would break the LLVM concurrency model). + SawUnorderedAtomic |= Store->isAtomic(); + SawNotAtomic |= !Store->isAtomic(); - // If the alignment of this instruction allows us to specify a more - // restrictive (and performant) alignment and if we are sure this - // instruction will be executed, update the alignment. - // Larger is better, with the exception of 0 being the best alignment. + // If the store is guaranteed to execute, both properties are satisfied. + // We may want to check if a store is guaranteed to execute even if we + // already know that promotion is safe, since it may have higher + // alignment than any other guaranteed stores, in which case we can + // raise the alignment on the promoted store. unsigned InstAlignment = Store->getAlignment(); - if ((InstAlignment > Alignment || InstAlignment == 0) && - Alignment != 0) { + if (!InstAlignment) + InstAlignment = + MDL.getABITypeAlignment(Store->getValueOperand()->getType()); + + if (!DereferenceableInPH || !SafeToInsertStore || + (InstAlignment > Alignment)) { if (isGuaranteedToExecute(*UI, DT, CurLoop, SafetyInfo)) { - GuaranteedToExecute = true; - Alignment = InstAlignment; + DereferenceableInPH = true; + SafeToInsertStore = true; + Alignment = std::max(Alignment, InstAlignment); } - } else if (!GuaranteedToExecute) { - GuaranteedToExecute = - isGuaranteedToExecute(*UI, DT, CurLoop, SafetyInfo); } - if (!GuaranteedToExecute && !CanSpeculateLoad) { - CanSpeculateLoad = isDereferenceableAndAlignedPointer( + // If a store dominates all exit blocks, it is safe to sink. + // As explained above, if an exit block was executed, a dominating + // store must have been been executed at least once, so we are not + // introducing stores on paths that did not have them. + // Note that this only looks at explicit exit blocks. If we ever + // start sinking stores into unwind edges (see above), this will break. + if (!SafeToInsertStore) + SafeToInsertStore = llvm::all_of(ExitBlocks, [&](BasicBlock *Exit) { + return DT->dominates(Store->getParent(), Exit); + }); + + // If the store is not guaranteed to execute, we may still get + // deref info through it. + if (!DereferenceableInPH) { + DereferenceableInPH = isDereferenceableAndAlignedPointer( Store->getPointerOperand(), Store->getAlignment(), MDL, Preheader->getTerminator(), DT); } } else - return Changed; // Not a load or store. + return false; // Not a load or store. // Merge the AA tags. if (LoopUses.empty()) { @@ -1000,38 +1239,44 @@ } } - // Check legality per comment above. Otherwise, we can't promote. - bool PromotionIsLegal = GuaranteedToExecute; - if (!PromotionIsLegal && CanSpeculateLoad) { - // If this is a thread local location, then we can insert stores along - // paths which originally didn't have them without violating the memory - // model. - Value *Object = GetUnderlyingObject(SomePtr, MDL); - PromotionIsLegal = - isAllocLikeFn(Object, TLI) && !PointerMayBeCaptured(Object, true, true); - } - if (!PromotionIsLegal) - return Changed; + // If we found both an unordered atomic instruction and a non-atomic memory + // access, bail. We can't blindly promote non-atomic to atomic since we + // might not be able to lower the result. We can't downgrade since that + // would violate memory model. Also, align 0 is an error for atomics. + if (SawUnorderedAtomic && SawNotAtomic) + return false; + + // If we couldn't prove we can hoist the load, bail. + if (!DereferenceableInPH) + return false; - // Figure out the loop exits and their insertion points, if this is the - // first promotion. - if (ExitBlocks.empty()) { - CurLoop->getUniqueExitBlocks(ExitBlocks); - InsertPts.clear(); - InsertPts.reserve(ExitBlocks.size()); - for (BasicBlock *ExitBlock : ExitBlocks) - InsertPts.push_back(&*ExitBlock->getFirstInsertionPt()); + // We know we can hoist the load, but don't have a guaranteed store. + // Check whether the location is thread-local. If it is, then we can insert + // stores along paths which originally didn't have them without violating the + // memory model. + if (!SafeToInsertStore) { + if (IsKnownThreadLocalObject) + SafeToInsertStore = true; + else { + Value *Object = GetUnderlyingObject(SomePtr, MDL); + SafeToInsertStore = + (isAllocLikeFn(Object, TLI) || isa<AllocaInst>(Object)) && + !PointerMayBeCaptured(Object, true, true); + } } - // Can't insert into a catchswitch. - for (BasicBlock *ExitBlock : ExitBlocks) - if (isa<CatchSwitchInst>(ExitBlock->getTerminator())) - return Changed; + // If we've still failed to prove we can sink the store, give up. + if (!SafeToInsertStore) + return false; // Otherwise, this is safe to promote, lets do it! DEBUG(dbgs() << "LICM: Promoting value stored to in loop: " << *SomePtr << '\n'); - Changed = true; + ORE->emit([&]() { + return OptimizationRemark(DEBUG_TYPE, "PromoteLoopAccessesToScalar", + LoopUses[0]) + << "Moving accesses to memory location out of the loop"; + }); ++NumPromoted; // Grab a debug location for the inserted loads/stores; given that the @@ -1044,12 +1289,15 @@ SmallVector<PHINode *, 16> NewPHIs; SSAUpdater SSA(&NewPHIs); LoopPromoter Promoter(SomePtr, LoopUses, SSA, PointerMustAliases, ExitBlocks, - InsertPts, PIC, *CurAST, *LI, DL, Alignment, AATags); + InsertPts, PIC, *CurAST, *LI, DL, Alignment, + SawUnorderedAtomic, AATags); // Set up the preheader to have a definition of the value. It is the live-out // value from the preheader that uses in the loop will use. LoadInst *PreheaderLoad = new LoadInst( SomePtr, SomePtr->getName() + ".promoted", Preheader->getTerminator()); + if (SawUnorderedAtomic) + PreheaderLoad->setOrdering(AtomicOrdering::Unordered); PreheaderLoad->setAlignment(Alignment); PreheaderLoad->setDebugLoc(DL); if (AATags) @@ -1064,7 +1312,7 @@ if (PreheaderLoad->use_empty()) PreheaderLoad->eraseFromParent(); - return Changed; + return true; } /// Returns an owning pointer to an alias set which incorporates aliasing info @@ -1106,11 +1354,8 @@ auto mergeLoop = [&](Loop *L) { // Loop over the body of this loop, looking for calls, invokes, and stores. - // Because subloops have already been incorporated into AST, we skip blocks - // in subloops. for (BasicBlock *BB : L->blocks()) - if (LI->getLoopFor(BB) == L) // Ignore blocks in subloops. - CurAST->add(*BB); // Incorporate the specified basic block + CurAST->add(*BB); // Incorporate the specified basic block }; // Add everything from the sub loops that are no longer directly available.