comparison lib/CodeGen/MachineLICM.cpp @ 0:95c75e76d11b LLVM3.4

LLVM 3.4
author Kaito Tokumori <e105711@ie.u-ryukyu.ac.jp>
date Thu, 12 Dec 2013 13:56:28 +0900
parents
children 54457678186b
comparison
equal deleted inserted replaced
-1:000000000000 0:95c75e76d11b
1 //===-- MachineLICM.cpp - Machine Loop Invariant Code Motion Pass ---------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This pass performs loop invariant code motion on machine instructions. We
11 // attempt to remove as much code from the body of a loop as possible.
12 //
13 // This pass does not attempt to throttle itself to limit register pressure.
14 // The register allocation phases are expected to perform rematerialization
15 // to recover when register pressure is high.
16 //
17 // This pass is not intended to be a replacement or a complete alternative
18 // for the LLVM-IR-level LICM pass. It is only designed to hoist simple
19 // constructs that are not exposed before lowering and instruction selection.
20 //
21 //===----------------------------------------------------------------------===//
22
23 #define DEBUG_TYPE "machine-licm"
24 #include "llvm/CodeGen/Passes.h"
25 #include "llvm/ADT/DenseMap.h"
26 #include "llvm/ADT/SmallSet.h"
27 #include "llvm/ADT/Statistic.h"
28 #include "llvm/Analysis/AliasAnalysis.h"
29 #include "llvm/CodeGen/MachineDominators.h"
30 #include "llvm/CodeGen/MachineFrameInfo.h"
31 #include "llvm/CodeGen/MachineLoopInfo.h"
32 #include "llvm/CodeGen/MachineMemOperand.h"
33 #include "llvm/CodeGen/MachineRegisterInfo.h"
34 #include "llvm/CodeGen/PseudoSourceValue.h"
35 #include "llvm/MC/MCInstrItineraries.h"
36 #include "llvm/Support/CommandLine.h"
37 #include "llvm/Support/Debug.h"
38 #include "llvm/Support/raw_ostream.h"
39 #include "llvm/Target/TargetInstrInfo.h"
40 #include "llvm/Target/TargetLowering.h"
41 #include "llvm/Target/TargetMachine.h"
42 #include "llvm/Target/TargetRegisterInfo.h"
43 using namespace llvm;
44
45 static cl::opt<bool>
46 AvoidSpeculation("avoid-speculation",
47 cl::desc("MachineLICM should avoid speculation"),
48 cl::init(true), cl::Hidden);
49
50 STATISTIC(NumHoisted,
51 "Number of machine instructions hoisted out of loops");
52 STATISTIC(NumLowRP,
53 "Number of instructions hoisted in low reg pressure situation");
54 STATISTIC(NumHighLatency,
55 "Number of high latency instructions hoisted");
56 STATISTIC(NumCSEed,
57 "Number of hoisted machine instructions CSEed");
58 STATISTIC(NumPostRAHoisted,
59 "Number of machine instructions hoisted out of loops post regalloc");
60
61 namespace {
62 class MachineLICM : public MachineFunctionPass {
63 const TargetMachine *TM;
64 const TargetInstrInfo *TII;
65 const TargetLoweringBase *TLI;
66 const TargetRegisterInfo *TRI;
67 const MachineFrameInfo *MFI;
68 MachineRegisterInfo *MRI;
69 const InstrItineraryData *InstrItins;
70 bool PreRegAlloc;
71
72 // Various analyses that we use...
73 AliasAnalysis *AA; // Alias analysis info.
74 MachineLoopInfo *MLI; // Current MachineLoopInfo
75 MachineDominatorTree *DT; // Machine dominator tree for the cur loop
76
77 // State that is updated as we process loops
78 bool Changed; // True if a loop is changed.
79 bool FirstInLoop; // True if it's the first LICM in the loop.
80 MachineLoop *CurLoop; // The current loop we are working on.
81 MachineBasicBlock *CurPreheader; // The preheader for CurLoop.
82
83 // Exit blocks for CurLoop.
84 SmallVector<MachineBasicBlock*, 8> ExitBlocks;
85
86 bool isExitBlock(const MachineBasicBlock *MBB) const {
87 return std::find(ExitBlocks.begin(), ExitBlocks.end(), MBB) !=
88 ExitBlocks.end();
89 }
90
91 // Track 'estimated' register pressure.
92 SmallSet<unsigned, 32> RegSeen;
93 SmallVector<unsigned, 8> RegPressure;
94
95 // Register pressure "limit" per register class. If the pressure
96 // is higher than the limit, then it's considered high.
97 SmallVector<unsigned, 8> RegLimit;
98
99 // Register pressure on path leading from loop preheader to current BB.
100 SmallVector<SmallVector<unsigned, 8>, 16> BackTrace;
101
102 // For each opcode, keep a list of potential CSE instructions.
103 DenseMap<unsigned, std::vector<const MachineInstr*> > CSEMap;
104
105 enum {
106 SpeculateFalse = 0,
107 SpeculateTrue = 1,
108 SpeculateUnknown = 2
109 };
110
111 // If a MBB does not dominate loop exiting blocks then it may not safe
112 // to hoist loads from this block.
113 // Tri-state: 0 - false, 1 - true, 2 - unknown
114 unsigned SpeculationState;
115
116 public:
117 static char ID; // Pass identification, replacement for typeid
118 MachineLICM() :
119 MachineFunctionPass(ID), PreRegAlloc(true) {
120 initializeMachineLICMPass(*PassRegistry::getPassRegistry());
121 }
122
123 explicit MachineLICM(bool PreRA) :
124 MachineFunctionPass(ID), PreRegAlloc(PreRA) {
125 initializeMachineLICMPass(*PassRegistry::getPassRegistry());
126 }
127
128 virtual bool runOnMachineFunction(MachineFunction &MF);
129
130 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
131 AU.addRequired<MachineLoopInfo>();
132 AU.addRequired<MachineDominatorTree>();
133 AU.addRequired<AliasAnalysis>();
134 AU.addPreserved<MachineLoopInfo>();
135 AU.addPreserved<MachineDominatorTree>();
136 MachineFunctionPass::getAnalysisUsage(AU);
137 }
138
139 virtual void releaseMemory() {
140 RegSeen.clear();
141 RegPressure.clear();
142 RegLimit.clear();
143 BackTrace.clear();
144 for (DenseMap<unsigned,std::vector<const MachineInstr*> >::iterator
145 CI = CSEMap.begin(), CE = CSEMap.end(); CI != CE; ++CI)
146 CI->second.clear();
147 CSEMap.clear();
148 }
149
150 private:
151 /// CandidateInfo - Keep track of information about hoisting candidates.
152 struct CandidateInfo {
153 MachineInstr *MI;
154 unsigned Def;
155 int FI;
156 CandidateInfo(MachineInstr *mi, unsigned def, int fi)
157 : MI(mi), Def(def), FI(fi) {}
158 };
159
160 /// HoistRegionPostRA - Walk the specified region of the CFG and hoist loop
161 /// invariants out to the preheader.
162 void HoistRegionPostRA();
163
164 /// HoistPostRA - When an instruction is found to only use loop invariant
165 /// operands that is safe to hoist, this instruction is called to do the
166 /// dirty work.
167 void HoistPostRA(MachineInstr *MI, unsigned Def);
168
169 /// ProcessMI - Examine the instruction for potentai LICM candidate. Also
170 /// gather register def and frame object update information.
171 void ProcessMI(MachineInstr *MI,
172 BitVector &PhysRegDefs,
173 BitVector &PhysRegClobbers,
174 SmallSet<int, 32> &StoredFIs,
175 SmallVectorImpl<CandidateInfo> &Candidates);
176
177 /// AddToLiveIns - Add register 'Reg' to the livein sets of BBs in the
178 /// current loop.
179 void AddToLiveIns(unsigned Reg);
180
181 /// IsLICMCandidate - Returns true if the instruction may be a suitable
182 /// candidate for LICM. e.g. If the instruction is a call, then it's
183 /// obviously not safe to hoist it.
184 bool IsLICMCandidate(MachineInstr &I);
185
186 /// IsLoopInvariantInst - Returns true if the instruction is loop
187 /// invariant. I.e., all virtual register operands are defined outside of
188 /// the loop, physical registers aren't accessed (explicitly or implicitly),
189 /// and the instruction is hoistable.
190 ///
191 bool IsLoopInvariantInst(MachineInstr &I);
192
193 /// HasLoopPHIUse - Return true if the specified instruction is used by any
194 /// phi node in the current loop.
195 bool HasLoopPHIUse(const MachineInstr *MI) const;
196
197 /// HasHighOperandLatency - Compute operand latency between a def of 'Reg'
198 /// and an use in the current loop, return true if the target considered
199 /// it 'high'.
200 bool HasHighOperandLatency(MachineInstr &MI, unsigned DefIdx,
201 unsigned Reg) const;
202
203 bool IsCheapInstruction(MachineInstr &MI) const;
204
205 /// CanCauseHighRegPressure - Visit BBs from header to current BB,
206 /// check if hoisting an instruction of the given cost matrix can cause high
207 /// register pressure.
208 bool CanCauseHighRegPressure(DenseMap<unsigned, int> &Cost, bool Cheap);
209
210 /// UpdateBackTraceRegPressure - Traverse the back trace from header to
211 /// the current block and update their register pressures to reflect the
212 /// effect of hoisting MI from the current block to the preheader.
213 void UpdateBackTraceRegPressure(const MachineInstr *MI);
214
215 /// IsProfitableToHoist - Return true if it is potentially profitable to
216 /// hoist the given loop invariant.
217 bool IsProfitableToHoist(MachineInstr &MI);
218
219 /// IsGuaranteedToExecute - Check if this mbb is guaranteed to execute.
220 /// If not then a load from this mbb may not be safe to hoist.
221 bool IsGuaranteedToExecute(MachineBasicBlock *BB);
222
223 void EnterScope(MachineBasicBlock *MBB);
224
225 void ExitScope(MachineBasicBlock *MBB);
226
227 /// ExitScopeIfDone - Destroy scope for the MBB that corresponds to given
228 /// dominator tree node if its a leaf or all of its children are done. Walk
229 /// up the dominator tree to destroy ancestors which are now done.
230 void ExitScopeIfDone(MachineDomTreeNode *Node,
231 DenseMap<MachineDomTreeNode*, unsigned> &OpenChildren,
232 DenseMap<MachineDomTreeNode*, MachineDomTreeNode*> &ParentMap);
233
234 /// HoistOutOfLoop - Walk the specified loop in the CFG (defined by all
235 /// blocks dominated by the specified header block, and that are in the
236 /// current loop) in depth first order w.r.t the DominatorTree. This allows
237 /// us to visit definitions before uses, allowing us to hoist a loop body in
238 /// one pass without iteration.
239 ///
240 void HoistOutOfLoop(MachineDomTreeNode *LoopHeaderNode);
241 void HoistRegion(MachineDomTreeNode *N, bool IsHeader);
242
243 /// getRegisterClassIDAndCost - For a given MI, register, and the operand
244 /// index, return the ID and cost of its representative register class by
245 /// reference.
246 void getRegisterClassIDAndCost(const MachineInstr *MI,
247 unsigned Reg, unsigned OpIdx,
248 unsigned &RCId, unsigned &RCCost) const;
249
250 /// InitRegPressure - Find all virtual register references that are liveout
251 /// of the preheader to initialize the starting "register pressure". Note
252 /// this does not count live through (livein but not used) registers.
253 void InitRegPressure(MachineBasicBlock *BB);
254
255 /// UpdateRegPressure - Update estimate of register pressure after the
256 /// specified instruction.
257 void UpdateRegPressure(const MachineInstr *MI);
258
259 /// ExtractHoistableLoad - Unfold a load from the given machineinstr if
260 /// the load itself could be hoisted. Return the unfolded and hoistable
261 /// load, or null if the load couldn't be unfolded or if it wouldn't
262 /// be hoistable.
263 MachineInstr *ExtractHoistableLoad(MachineInstr *MI);
264
265 /// LookForDuplicate - Find an instruction amount PrevMIs that is a
266 /// duplicate of MI. Return this instruction if it's found.
267 const MachineInstr *LookForDuplicate(const MachineInstr *MI,
268 std::vector<const MachineInstr*> &PrevMIs);
269
270 /// EliminateCSE - Given a LICM'ed instruction, look for an instruction on
271 /// the preheader that compute the same value. If it's found, do a RAU on
272 /// with the definition of the existing instruction rather than hoisting
273 /// the instruction to the preheader.
274 bool EliminateCSE(MachineInstr *MI,
275 DenseMap<unsigned, std::vector<const MachineInstr*> >::iterator &CI);
276
277 /// MayCSE - Return true if the given instruction will be CSE'd if it's
278 /// hoisted out of the loop.
279 bool MayCSE(MachineInstr *MI);
280
281 /// Hoist - When an instruction is found to only use loop invariant operands
282 /// that is safe to hoist, this instruction is called to do the dirty work.
283 /// It returns true if the instruction is hoisted.
284 bool Hoist(MachineInstr *MI, MachineBasicBlock *Preheader);
285
286 /// InitCSEMap - Initialize the CSE map with instructions that are in the
287 /// current loop preheader that may become duplicates of instructions that
288 /// are hoisted out of the loop.
289 void InitCSEMap(MachineBasicBlock *BB);
290
291 /// getCurPreheader - Get the preheader for the current loop, splitting
292 /// a critical edge if needed.
293 MachineBasicBlock *getCurPreheader();
294 };
295 } // end anonymous namespace
296
297 char MachineLICM::ID = 0;
298 char &llvm::MachineLICMID = MachineLICM::ID;
299 INITIALIZE_PASS_BEGIN(MachineLICM, "machinelicm",
300 "Machine Loop Invariant Code Motion", false, false)
301 INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo)
302 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree)
303 INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
304 INITIALIZE_PASS_END(MachineLICM, "machinelicm",
305 "Machine Loop Invariant Code Motion", false, false)
306
307 /// LoopIsOuterMostWithPredecessor - Test if the given loop is the outer-most
308 /// loop that has a unique predecessor.
309 static bool LoopIsOuterMostWithPredecessor(MachineLoop *CurLoop) {
310 // Check whether this loop even has a unique predecessor.
311 if (!CurLoop->getLoopPredecessor())
312 return false;
313 // Ok, now check to see if any of its outer loops do.
314 for (MachineLoop *L = CurLoop->getParentLoop(); L; L = L->getParentLoop())
315 if (L->getLoopPredecessor())
316 return false;
317 // None of them did, so this is the outermost with a unique predecessor.
318 return true;
319 }
320
321 bool MachineLICM::runOnMachineFunction(MachineFunction &MF) {
322 Changed = FirstInLoop = false;
323 TM = &MF.getTarget();
324 TII = TM->getInstrInfo();
325 TLI = TM->getTargetLowering();
326 TRI = TM->getRegisterInfo();
327 MFI = MF.getFrameInfo();
328 MRI = &MF.getRegInfo();
329 InstrItins = TM->getInstrItineraryData();
330
331 PreRegAlloc = MRI->isSSA();
332
333 if (PreRegAlloc)
334 DEBUG(dbgs() << "******** Pre-regalloc Machine LICM: ");
335 else
336 DEBUG(dbgs() << "******** Post-regalloc Machine LICM: ");
337 DEBUG(dbgs() << MF.getName() << " ********\n");
338
339 if (PreRegAlloc) {
340 // Estimate register pressure during pre-regalloc pass.
341 unsigned NumRC = TRI->getNumRegClasses();
342 RegPressure.resize(NumRC);
343 std::fill(RegPressure.begin(), RegPressure.end(), 0);
344 RegLimit.resize(NumRC);
345 for (TargetRegisterInfo::regclass_iterator I = TRI->regclass_begin(),
346 E = TRI->regclass_end(); I != E; ++I)
347 RegLimit[(*I)->getID()] = TRI->getRegPressureLimit(*I, MF);
348 }
349
350 // Get our Loop information...
351 MLI = &getAnalysis<MachineLoopInfo>();
352 DT = &getAnalysis<MachineDominatorTree>();
353 AA = &getAnalysis<AliasAnalysis>();
354
355 SmallVector<MachineLoop *, 8> Worklist(MLI->begin(), MLI->end());
356 while (!Worklist.empty()) {
357 CurLoop = Worklist.pop_back_val();
358 CurPreheader = 0;
359 ExitBlocks.clear();
360
361 // If this is done before regalloc, only visit outer-most preheader-sporting
362 // loops.
363 if (PreRegAlloc && !LoopIsOuterMostWithPredecessor(CurLoop)) {
364 Worklist.append(CurLoop->begin(), CurLoop->end());
365 continue;
366 }
367
368 CurLoop->getExitBlocks(ExitBlocks);
369
370 if (!PreRegAlloc)
371 HoistRegionPostRA();
372 else {
373 // CSEMap is initialized for loop header when the first instruction is
374 // being hoisted.
375 MachineDomTreeNode *N = DT->getNode(CurLoop->getHeader());
376 FirstInLoop = true;
377 HoistOutOfLoop(N);
378 CSEMap.clear();
379 }
380 }
381
382 return Changed;
383 }
384
385 /// InstructionStoresToFI - Return true if instruction stores to the
386 /// specified frame.
387 static bool InstructionStoresToFI(const MachineInstr *MI, int FI) {
388 for (MachineInstr::mmo_iterator o = MI->memoperands_begin(),
389 oe = MI->memoperands_end(); o != oe; ++o) {
390 if (!(*o)->isStore() || !(*o)->getValue())
391 continue;
392 if (const FixedStackPseudoSourceValue *Value =
393 dyn_cast<const FixedStackPseudoSourceValue>((*o)->getValue())) {
394 if (Value->getFrameIndex() == FI)
395 return true;
396 }
397 }
398 return false;
399 }
400
401 /// ProcessMI - Examine the instruction for potentai LICM candidate. Also
402 /// gather register def and frame object update information.
403 void MachineLICM::ProcessMI(MachineInstr *MI,
404 BitVector &PhysRegDefs,
405 BitVector &PhysRegClobbers,
406 SmallSet<int, 32> &StoredFIs,
407 SmallVectorImpl<CandidateInfo> &Candidates) {
408 bool RuledOut = false;
409 bool HasNonInvariantUse = false;
410 unsigned Def = 0;
411 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
412 const MachineOperand &MO = MI->getOperand(i);
413 if (MO.isFI()) {
414 // Remember if the instruction stores to the frame index.
415 int FI = MO.getIndex();
416 if (!StoredFIs.count(FI) &&
417 MFI->isSpillSlotObjectIndex(FI) &&
418 InstructionStoresToFI(MI, FI))
419 StoredFIs.insert(FI);
420 HasNonInvariantUse = true;
421 continue;
422 }
423
424 // We can't hoist an instruction defining a physreg that is clobbered in
425 // the loop.
426 if (MO.isRegMask()) {
427 PhysRegClobbers.setBitsNotInMask(MO.getRegMask());
428 continue;
429 }
430
431 if (!MO.isReg())
432 continue;
433 unsigned Reg = MO.getReg();
434 if (!Reg)
435 continue;
436 assert(TargetRegisterInfo::isPhysicalRegister(Reg) &&
437 "Not expecting virtual register!");
438
439 if (!MO.isDef()) {
440 if (Reg && (PhysRegDefs.test(Reg) || PhysRegClobbers.test(Reg)))
441 // If it's using a non-loop-invariant register, then it's obviously not
442 // safe to hoist.
443 HasNonInvariantUse = true;
444 continue;
445 }
446
447 if (MO.isImplicit()) {
448 for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
449 PhysRegClobbers.set(*AI);
450 if (!MO.isDead())
451 // Non-dead implicit def? This cannot be hoisted.
452 RuledOut = true;
453 // No need to check if a dead implicit def is also defined by
454 // another instruction.
455 continue;
456 }
457
458 // FIXME: For now, avoid instructions with multiple defs, unless
459 // it's a dead implicit def.
460 if (Def)
461 RuledOut = true;
462 else
463 Def = Reg;
464
465 // If we have already seen another instruction that defines the same
466 // register, then this is not safe. Two defs is indicated by setting a
467 // PhysRegClobbers bit.
468 for (MCRegAliasIterator AS(Reg, TRI, true); AS.isValid(); ++AS) {
469 if (PhysRegDefs.test(*AS))
470 PhysRegClobbers.set(*AS);
471 PhysRegDefs.set(*AS);
472 }
473 if (PhysRegClobbers.test(Reg))
474 // MI defined register is seen defined by another instruction in
475 // the loop, it cannot be a LICM candidate.
476 RuledOut = true;
477 }
478
479 // Only consider reloads for now and remats which do not have register
480 // operands. FIXME: Consider unfold load folding instructions.
481 if (Def && !RuledOut) {
482 int FI = INT_MIN;
483 if ((!HasNonInvariantUse && IsLICMCandidate(*MI)) ||
484 (TII->isLoadFromStackSlot(MI, FI) && MFI->isSpillSlotObjectIndex(FI)))
485 Candidates.push_back(CandidateInfo(MI, Def, FI));
486 }
487 }
488
489 /// HoistRegionPostRA - Walk the specified region of the CFG and hoist loop
490 /// invariants out to the preheader.
491 void MachineLICM::HoistRegionPostRA() {
492 MachineBasicBlock *Preheader = getCurPreheader();
493 if (!Preheader)
494 return;
495
496 unsigned NumRegs = TRI->getNumRegs();
497 BitVector PhysRegDefs(NumRegs); // Regs defined once in the loop.
498 BitVector PhysRegClobbers(NumRegs); // Regs defined more than once.
499
500 SmallVector<CandidateInfo, 32> Candidates;
501 SmallSet<int, 32> StoredFIs;
502
503 // Walk the entire region, count number of defs for each register, and
504 // collect potential LICM candidates.
505 const std::vector<MachineBasicBlock *> &Blocks = CurLoop->getBlocks();
506 for (unsigned i = 0, e = Blocks.size(); i != e; ++i) {
507 MachineBasicBlock *BB = Blocks[i];
508
509 // If the header of the loop containing this basic block is a landing pad,
510 // then don't try to hoist instructions out of this loop.
511 const MachineLoop *ML = MLI->getLoopFor(BB);
512 if (ML && ML->getHeader()->isLandingPad()) continue;
513
514 // Conservatively treat live-in's as an external def.
515 // FIXME: That means a reload that're reused in successor block(s) will not
516 // be LICM'ed.
517 for (MachineBasicBlock::livein_iterator I = BB->livein_begin(),
518 E = BB->livein_end(); I != E; ++I) {
519 unsigned Reg = *I;
520 for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
521 PhysRegDefs.set(*AI);
522 }
523
524 SpeculationState = SpeculateUnknown;
525 for (MachineBasicBlock::iterator
526 MII = BB->begin(), E = BB->end(); MII != E; ++MII) {
527 MachineInstr *MI = &*MII;
528 ProcessMI(MI, PhysRegDefs, PhysRegClobbers, StoredFIs, Candidates);
529 }
530 }
531
532 // Gather the registers read / clobbered by the terminator.
533 BitVector TermRegs(NumRegs);
534 MachineBasicBlock::iterator TI = Preheader->getFirstTerminator();
535 if (TI != Preheader->end()) {
536 for (unsigned i = 0, e = TI->getNumOperands(); i != e; ++i) {
537 const MachineOperand &MO = TI->getOperand(i);
538 if (!MO.isReg())
539 continue;
540 unsigned Reg = MO.getReg();
541 if (!Reg)
542 continue;
543 for (MCRegAliasIterator AI(Reg, TRI, true); AI.isValid(); ++AI)
544 TermRegs.set(*AI);
545 }
546 }
547
548 // Now evaluate whether the potential candidates qualify.
549 // 1. Check if the candidate defined register is defined by another
550 // instruction in the loop.
551 // 2. If the candidate is a load from stack slot (always true for now),
552 // check if the slot is stored anywhere in the loop.
553 // 3. Make sure candidate def should not clobber
554 // registers read by the terminator. Similarly its def should not be
555 // clobbered by the terminator.
556 for (unsigned i = 0, e = Candidates.size(); i != e; ++i) {
557 if (Candidates[i].FI != INT_MIN &&
558 StoredFIs.count(Candidates[i].FI))
559 continue;
560
561 unsigned Def = Candidates[i].Def;
562 if (!PhysRegClobbers.test(Def) && !TermRegs.test(Def)) {
563 bool Safe = true;
564 MachineInstr *MI = Candidates[i].MI;
565 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
566 const MachineOperand &MO = MI->getOperand(j);
567 if (!MO.isReg() || MO.isDef() || !MO.getReg())
568 continue;
569 unsigned Reg = MO.getReg();
570 if (PhysRegDefs.test(Reg) ||
571 PhysRegClobbers.test(Reg)) {
572 // If it's using a non-loop-invariant register, then it's obviously
573 // not safe to hoist.
574 Safe = false;
575 break;
576 }
577 }
578 if (Safe)
579 HoistPostRA(MI, Candidates[i].Def);
580 }
581 }
582 }
583
584 /// AddToLiveIns - Add register 'Reg' to the livein sets of BBs in the current
585 /// loop, and make sure it is not killed by any instructions in the loop.
586 void MachineLICM::AddToLiveIns(unsigned Reg) {
587 const std::vector<MachineBasicBlock *> &Blocks = CurLoop->getBlocks();
588 for (unsigned i = 0, e = Blocks.size(); i != e; ++i) {
589 MachineBasicBlock *BB = Blocks[i];
590 if (!BB->isLiveIn(Reg))
591 BB->addLiveIn(Reg);
592 for (MachineBasicBlock::iterator
593 MII = BB->begin(), E = BB->end(); MII != E; ++MII) {
594 MachineInstr *MI = &*MII;
595 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
596 MachineOperand &MO = MI->getOperand(i);
597 if (!MO.isReg() || !MO.getReg() || MO.isDef()) continue;
598 if (MO.getReg() == Reg || TRI->isSuperRegister(Reg, MO.getReg()))
599 MO.setIsKill(false);
600 }
601 }
602 }
603 }
604
605 /// HoistPostRA - When an instruction is found to only use loop invariant
606 /// operands that is safe to hoist, this instruction is called to do the
607 /// dirty work.
608 void MachineLICM::HoistPostRA(MachineInstr *MI, unsigned Def) {
609 MachineBasicBlock *Preheader = getCurPreheader();
610
611 // Now move the instructions to the predecessor, inserting it before any
612 // terminator instructions.
613 DEBUG(dbgs() << "Hoisting to BB#" << Preheader->getNumber() << " from BB#"
614 << MI->getParent()->getNumber() << ": " << *MI);
615
616 // Splice the instruction to the preheader.
617 MachineBasicBlock *MBB = MI->getParent();
618 Preheader->splice(Preheader->getFirstTerminator(), MBB, MI);
619
620 // Add register to livein list to all the BBs in the current loop since a
621 // loop invariant must be kept live throughout the whole loop. This is
622 // important to ensure later passes do not scavenge the def register.
623 AddToLiveIns(Def);
624
625 ++NumPostRAHoisted;
626 Changed = true;
627 }
628
629 // IsGuaranteedToExecute - Check if this mbb is guaranteed to execute.
630 // If not then a load from this mbb may not be safe to hoist.
631 bool MachineLICM::IsGuaranteedToExecute(MachineBasicBlock *BB) {
632 if (SpeculationState != SpeculateUnknown)
633 return SpeculationState == SpeculateFalse;
634
635 if (BB != CurLoop->getHeader()) {
636 // Check loop exiting blocks.
637 SmallVector<MachineBasicBlock*, 8> CurrentLoopExitingBlocks;
638 CurLoop->getExitingBlocks(CurrentLoopExitingBlocks);
639 for (unsigned i = 0, e = CurrentLoopExitingBlocks.size(); i != e; ++i)
640 if (!DT->dominates(BB, CurrentLoopExitingBlocks[i])) {
641 SpeculationState = SpeculateTrue;
642 return false;
643 }
644 }
645
646 SpeculationState = SpeculateFalse;
647 return true;
648 }
649
650 void MachineLICM::EnterScope(MachineBasicBlock *MBB) {
651 DEBUG(dbgs() << "Entering: " << MBB->getName() << '\n');
652
653 // Remember livein register pressure.
654 BackTrace.push_back(RegPressure);
655 }
656
657 void MachineLICM::ExitScope(MachineBasicBlock *MBB) {
658 DEBUG(dbgs() << "Exiting: " << MBB->getName() << '\n');
659 BackTrace.pop_back();
660 }
661
662 /// ExitScopeIfDone - Destroy scope for the MBB that corresponds to the given
663 /// dominator tree node if its a leaf or all of its children are done. Walk
664 /// up the dominator tree to destroy ancestors which are now done.
665 void MachineLICM::ExitScopeIfDone(MachineDomTreeNode *Node,
666 DenseMap<MachineDomTreeNode*, unsigned> &OpenChildren,
667 DenseMap<MachineDomTreeNode*, MachineDomTreeNode*> &ParentMap) {
668 if (OpenChildren[Node])
669 return;
670
671 // Pop scope.
672 ExitScope(Node->getBlock());
673
674 // Now traverse upwards to pop ancestors whose offsprings are all done.
675 while (MachineDomTreeNode *Parent = ParentMap[Node]) {
676 unsigned Left = --OpenChildren[Parent];
677 if (Left != 0)
678 break;
679 ExitScope(Parent->getBlock());
680 Node = Parent;
681 }
682 }
683
684 /// HoistOutOfLoop - Walk the specified loop in the CFG (defined by all
685 /// blocks dominated by the specified header block, and that are in the
686 /// current loop) in depth first order w.r.t the DominatorTree. This allows
687 /// us to visit definitions before uses, allowing us to hoist a loop body in
688 /// one pass without iteration.
689 ///
690 void MachineLICM::HoistOutOfLoop(MachineDomTreeNode *HeaderN) {
691 SmallVector<MachineDomTreeNode*, 32> Scopes;
692 SmallVector<MachineDomTreeNode*, 8> WorkList;
693 DenseMap<MachineDomTreeNode*, MachineDomTreeNode*> ParentMap;
694 DenseMap<MachineDomTreeNode*, unsigned> OpenChildren;
695
696 // Perform a DFS walk to determine the order of visit.
697 WorkList.push_back(HeaderN);
698 do {
699 MachineDomTreeNode *Node = WorkList.pop_back_val();
700 assert(Node != 0 && "Null dominator tree node?");
701 MachineBasicBlock *BB = Node->getBlock();
702
703 // If the header of the loop containing this basic block is a landing pad,
704 // then don't try to hoist instructions out of this loop.
705 const MachineLoop *ML = MLI->getLoopFor(BB);
706 if (ML && ML->getHeader()->isLandingPad())
707 continue;
708
709 // If this subregion is not in the top level loop at all, exit.
710 if (!CurLoop->contains(BB))
711 continue;
712
713 Scopes.push_back(Node);
714 const std::vector<MachineDomTreeNode*> &Children = Node->getChildren();
715 unsigned NumChildren = Children.size();
716
717 // Don't hoist things out of a large switch statement. This often causes
718 // code to be hoisted that wasn't going to be executed, and increases
719 // register pressure in a situation where it's likely to matter.
720 if (BB->succ_size() >= 25)
721 NumChildren = 0;
722
723 OpenChildren[Node] = NumChildren;
724 // Add children in reverse order as then the next popped worklist node is
725 // the first child of this node. This means we ultimately traverse the
726 // DOM tree in exactly the same order as if we'd recursed.
727 for (int i = (int)NumChildren-1; i >= 0; --i) {
728 MachineDomTreeNode *Child = Children[i];
729 ParentMap[Child] = Node;
730 WorkList.push_back(Child);
731 }
732 } while (!WorkList.empty());
733
734 if (Scopes.size() != 0) {
735 MachineBasicBlock *Preheader = getCurPreheader();
736 if (!Preheader)
737 return;
738
739 // Compute registers which are livein into the loop headers.
740 RegSeen.clear();
741 BackTrace.clear();
742 InitRegPressure(Preheader);
743 }
744
745 // Now perform LICM.
746 for (unsigned i = 0, e = Scopes.size(); i != e; ++i) {
747 MachineDomTreeNode *Node = Scopes[i];
748 MachineBasicBlock *MBB = Node->getBlock();
749
750 MachineBasicBlock *Preheader = getCurPreheader();
751 if (!Preheader)
752 continue;
753
754 EnterScope(MBB);
755
756 // Process the block
757 SpeculationState = SpeculateUnknown;
758 for (MachineBasicBlock::iterator
759 MII = MBB->begin(), E = MBB->end(); MII != E; ) {
760 MachineBasicBlock::iterator NextMII = MII; ++NextMII;
761 MachineInstr *MI = &*MII;
762 if (!Hoist(MI, Preheader))
763 UpdateRegPressure(MI);
764 MII = NextMII;
765 }
766
767 // If it's a leaf node, it's done. Traverse upwards to pop ancestors.
768 ExitScopeIfDone(Node, OpenChildren, ParentMap);
769 }
770 }
771
772 static bool isOperandKill(const MachineOperand &MO, MachineRegisterInfo *MRI) {
773 return MO.isKill() || MRI->hasOneNonDBGUse(MO.getReg());
774 }
775
776 /// getRegisterClassIDAndCost - For a given MI, register, and the operand
777 /// index, return the ID and cost of its representative register class.
778 void
779 MachineLICM::getRegisterClassIDAndCost(const MachineInstr *MI,
780 unsigned Reg, unsigned OpIdx,
781 unsigned &RCId, unsigned &RCCost) const {
782 const TargetRegisterClass *RC = MRI->getRegClass(Reg);
783 MVT VT = *RC->vt_begin();
784 if (VT == MVT::Untyped) {
785 RCId = RC->getID();
786 RCCost = 1;
787 } else {
788 RCId = TLI->getRepRegClassFor(VT)->getID();
789 RCCost = TLI->getRepRegClassCostFor(VT);
790 }
791 }
792
793 /// InitRegPressure - Find all virtual register references that are liveout of
794 /// the preheader to initialize the starting "register pressure". Note this
795 /// does not count live through (livein but not used) registers.
796 void MachineLICM::InitRegPressure(MachineBasicBlock *BB) {
797 std::fill(RegPressure.begin(), RegPressure.end(), 0);
798
799 // If the preheader has only a single predecessor and it ends with a
800 // fallthrough or an unconditional branch, then scan its predecessor for live
801 // defs as well. This happens whenever the preheader is created by splitting
802 // the critical edge from the loop predecessor to the loop header.
803 if (BB->pred_size() == 1) {
804 MachineBasicBlock *TBB = 0, *FBB = 0;
805 SmallVector<MachineOperand, 4> Cond;
806 if (!TII->AnalyzeBranch(*BB, TBB, FBB, Cond, false) && Cond.empty())
807 InitRegPressure(*BB->pred_begin());
808 }
809
810 for (MachineBasicBlock::iterator MII = BB->begin(), E = BB->end();
811 MII != E; ++MII) {
812 MachineInstr *MI = &*MII;
813 for (unsigned i = 0, e = MI->getDesc().getNumOperands(); i != e; ++i) {
814 const MachineOperand &MO = MI->getOperand(i);
815 if (!MO.isReg() || MO.isImplicit())
816 continue;
817 unsigned Reg = MO.getReg();
818 if (!TargetRegisterInfo::isVirtualRegister(Reg))
819 continue;
820
821 bool isNew = RegSeen.insert(Reg);
822 unsigned RCId, RCCost;
823 getRegisterClassIDAndCost(MI, Reg, i, RCId, RCCost);
824 if (MO.isDef())
825 RegPressure[RCId] += RCCost;
826 else {
827 bool isKill = isOperandKill(MO, MRI);
828 if (isNew && !isKill)
829 // Haven't seen this, it must be a livein.
830 RegPressure[RCId] += RCCost;
831 else if (!isNew && isKill)
832 RegPressure[RCId] -= RCCost;
833 }
834 }
835 }
836 }
837
838 /// UpdateRegPressure - Update estimate of register pressure after the
839 /// specified instruction.
840 void MachineLICM::UpdateRegPressure(const MachineInstr *MI) {
841 if (MI->isImplicitDef())
842 return;
843
844 SmallVector<unsigned, 4> Defs;
845 for (unsigned i = 0, e = MI->getDesc().getNumOperands(); i != e; ++i) {
846 const MachineOperand &MO = MI->getOperand(i);
847 if (!MO.isReg() || MO.isImplicit())
848 continue;
849 unsigned Reg = MO.getReg();
850 if (!TargetRegisterInfo::isVirtualRegister(Reg))
851 continue;
852
853 bool isNew = RegSeen.insert(Reg);
854 if (MO.isDef())
855 Defs.push_back(Reg);
856 else if (!isNew && isOperandKill(MO, MRI)) {
857 unsigned RCId, RCCost;
858 getRegisterClassIDAndCost(MI, Reg, i, RCId, RCCost);
859 if (RCCost > RegPressure[RCId])
860 RegPressure[RCId] = 0;
861 else
862 RegPressure[RCId] -= RCCost;
863 }
864 }
865
866 unsigned Idx = 0;
867 while (!Defs.empty()) {
868 unsigned Reg = Defs.pop_back_val();
869 unsigned RCId, RCCost;
870 getRegisterClassIDAndCost(MI, Reg, Idx, RCId, RCCost);
871 RegPressure[RCId] += RCCost;
872 ++Idx;
873 }
874 }
875
876 /// isLoadFromGOTOrConstantPool - Return true if this machine instruction
877 /// loads from global offset table or constant pool.
878 static bool isLoadFromGOTOrConstantPool(MachineInstr &MI) {
879 assert (MI.mayLoad() && "Expected MI that loads!");
880 for (MachineInstr::mmo_iterator I = MI.memoperands_begin(),
881 E = MI.memoperands_end(); I != E; ++I) {
882 if (const Value *V = (*I)->getValue()) {
883 if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V))
884 if (PSV == PSV->getGOT() || PSV == PSV->getConstantPool())
885 return true;
886 }
887 }
888 return false;
889 }
890
891 /// IsLICMCandidate - Returns true if the instruction may be a suitable
892 /// candidate for LICM. e.g. If the instruction is a call, then it's obviously
893 /// not safe to hoist it.
894 bool MachineLICM::IsLICMCandidate(MachineInstr &I) {
895 // Check if it's safe to move the instruction.
896 bool DontMoveAcrossStore = true;
897 if (!I.isSafeToMove(TII, AA, DontMoveAcrossStore))
898 return false;
899
900 // If it is load then check if it is guaranteed to execute by making sure that
901 // it dominates all exiting blocks. If it doesn't, then there is a path out of
902 // the loop which does not execute this load, so we can't hoist it. Loads
903 // from constant memory are not safe to speculate all the time, for example
904 // indexed load from a jump table.
905 // Stores and side effects are already checked by isSafeToMove.
906 if (I.mayLoad() && !isLoadFromGOTOrConstantPool(I) &&
907 !IsGuaranteedToExecute(I.getParent()))
908 return false;
909
910 return true;
911 }
912
913 /// IsLoopInvariantInst - Returns true if the instruction is loop
914 /// invariant. I.e., all virtual register operands are defined outside of the
915 /// loop, physical registers aren't accessed explicitly, and there are no side
916 /// effects that aren't captured by the operands or other flags.
917 ///
918 bool MachineLICM::IsLoopInvariantInst(MachineInstr &I) {
919 if (!IsLICMCandidate(I))
920 return false;
921
922 // The instruction is loop invariant if all of its operands are.
923 for (unsigned i = 0, e = I.getNumOperands(); i != e; ++i) {
924 const MachineOperand &MO = I.getOperand(i);
925
926 if (!MO.isReg())
927 continue;
928
929 unsigned Reg = MO.getReg();
930 if (Reg == 0) continue;
931
932 // Don't hoist an instruction that uses or defines a physical register.
933 if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
934 if (MO.isUse()) {
935 // If the physreg has no defs anywhere, it's just an ambient register
936 // and we can freely move its uses. Alternatively, if it's allocatable,
937 // it could get allocated to something with a def during allocation.
938 if (!MRI->isConstantPhysReg(Reg, *I.getParent()->getParent()))
939 return false;
940 // Otherwise it's safe to move.
941 continue;
942 } else if (!MO.isDead()) {
943 // A def that isn't dead. We can't move it.
944 return false;
945 } else if (CurLoop->getHeader()->isLiveIn(Reg)) {
946 // If the reg is live into the loop, we can't hoist an instruction
947 // which would clobber it.
948 return false;
949 }
950 }
951
952 if (!MO.isUse())
953 continue;
954
955 assert(MRI->getVRegDef(Reg) &&
956 "Machine instr not mapped for this vreg?!");
957
958 // If the loop contains the definition of an operand, then the instruction
959 // isn't loop invariant.
960 if (CurLoop->contains(MRI->getVRegDef(Reg)))
961 return false;
962 }
963
964 // If we got this far, the instruction is loop invariant!
965 return true;
966 }
967
968
969 /// HasLoopPHIUse - Return true if the specified instruction is used by a
970 /// phi node and hoisting it could cause a copy to be inserted.
971 bool MachineLICM::HasLoopPHIUse(const MachineInstr *MI) const {
972 SmallVector<const MachineInstr*, 8> Work(1, MI);
973 do {
974 MI = Work.pop_back_val();
975 for (ConstMIOperands MO(MI); MO.isValid(); ++MO) {
976 if (!MO->isReg() || !MO->isDef())
977 continue;
978 unsigned Reg = MO->getReg();
979 if (!TargetRegisterInfo::isVirtualRegister(Reg))
980 continue;
981 for (MachineRegisterInfo::use_iterator UI = MRI->use_begin(Reg),
982 UE = MRI->use_end(); UI != UE; ++UI) {
983 MachineInstr *UseMI = &*UI;
984 // A PHI may cause a copy to be inserted.
985 if (UseMI->isPHI()) {
986 // A PHI inside the loop causes a copy because the live range of Reg is
987 // extended across the PHI.
988 if (CurLoop->contains(UseMI))
989 return true;
990 // A PHI in an exit block can cause a copy to be inserted if the PHI
991 // has multiple predecessors in the loop with different values.
992 // For now, approximate by rejecting all exit blocks.
993 if (isExitBlock(UseMI->getParent()))
994 return true;
995 continue;
996 }
997 // Look past copies as well.
998 if (UseMI->isCopy() && CurLoop->contains(UseMI))
999 Work.push_back(UseMI);
1000 }
1001 }
1002 } while (!Work.empty());
1003 return false;
1004 }
1005
1006 /// HasHighOperandLatency - Compute operand latency between a def of 'Reg'
1007 /// and an use in the current loop, return true if the target considered
1008 /// it 'high'.
1009 bool MachineLICM::HasHighOperandLatency(MachineInstr &MI,
1010 unsigned DefIdx, unsigned Reg) const {
1011 if (!InstrItins || InstrItins->isEmpty() || MRI->use_nodbg_empty(Reg))
1012 return false;
1013
1014 for (MachineRegisterInfo::use_nodbg_iterator I = MRI->use_nodbg_begin(Reg),
1015 E = MRI->use_nodbg_end(); I != E; ++I) {
1016 MachineInstr *UseMI = &*I;
1017 if (UseMI->isCopyLike())
1018 continue;
1019 if (!CurLoop->contains(UseMI->getParent()))
1020 continue;
1021 for (unsigned i = 0, e = UseMI->getNumOperands(); i != e; ++i) {
1022 const MachineOperand &MO = UseMI->getOperand(i);
1023 if (!MO.isReg() || !MO.isUse())
1024 continue;
1025 unsigned MOReg = MO.getReg();
1026 if (MOReg != Reg)
1027 continue;
1028
1029 if (TII->hasHighOperandLatency(InstrItins, MRI, &MI, DefIdx, UseMI, i))
1030 return true;
1031 }
1032
1033 // Only look at the first in loop use.
1034 break;
1035 }
1036
1037 return false;
1038 }
1039
1040 /// IsCheapInstruction - Return true if the instruction is marked "cheap" or
1041 /// the operand latency between its def and a use is one or less.
1042 bool MachineLICM::IsCheapInstruction(MachineInstr &MI) const {
1043 if (MI.isAsCheapAsAMove() || MI.isCopyLike())
1044 return true;
1045 if (!InstrItins || InstrItins->isEmpty())
1046 return false;
1047
1048 bool isCheap = false;
1049 unsigned NumDefs = MI.getDesc().getNumDefs();
1050 for (unsigned i = 0, e = MI.getNumOperands(); NumDefs && i != e; ++i) {
1051 MachineOperand &DefMO = MI.getOperand(i);
1052 if (!DefMO.isReg() || !DefMO.isDef())
1053 continue;
1054 --NumDefs;
1055 unsigned Reg = DefMO.getReg();
1056 if (TargetRegisterInfo::isPhysicalRegister(Reg))
1057 continue;
1058
1059 if (!TII->hasLowDefLatency(InstrItins, &MI, i))
1060 return false;
1061 isCheap = true;
1062 }
1063
1064 return isCheap;
1065 }
1066
1067 /// CanCauseHighRegPressure - Visit BBs from header to current BB, check
1068 /// if hoisting an instruction of the given cost matrix can cause high
1069 /// register pressure.
1070 bool MachineLICM::CanCauseHighRegPressure(DenseMap<unsigned, int> &Cost,
1071 bool CheapInstr) {
1072 for (DenseMap<unsigned, int>::iterator CI = Cost.begin(), CE = Cost.end();
1073 CI != CE; ++CI) {
1074 if (CI->second <= 0)
1075 continue;
1076
1077 unsigned RCId = CI->first;
1078 unsigned Limit = RegLimit[RCId];
1079 int Cost = CI->second;
1080
1081 // Don't hoist cheap instructions if they would increase register pressure,
1082 // even if we're under the limit.
1083 if (CheapInstr)
1084 return true;
1085
1086 for (unsigned i = BackTrace.size(); i != 0; --i) {
1087 SmallVectorImpl<unsigned> &RP = BackTrace[i-1];
1088 if (RP[RCId] + Cost >= Limit)
1089 return true;
1090 }
1091 }
1092
1093 return false;
1094 }
1095
1096 /// UpdateBackTraceRegPressure - Traverse the back trace from header to the
1097 /// current block and update their register pressures to reflect the effect
1098 /// of hoisting MI from the current block to the preheader.
1099 void MachineLICM::UpdateBackTraceRegPressure(const MachineInstr *MI) {
1100 if (MI->isImplicitDef())
1101 return;
1102
1103 // First compute the 'cost' of the instruction, i.e. its contribution
1104 // to register pressure.
1105 DenseMap<unsigned, int> Cost;
1106 for (unsigned i = 0, e = MI->getDesc().getNumOperands(); i != e; ++i) {
1107 const MachineOperand &MO = MI->getOperand(i);
1108 if (!MO.isReg() || MO.isImplicit())
1109 continue;
1110 unsigned Reg = MO.getReg();
1111 if (!TargetRegisterInfo::isVirtualRegister(Reg))
1112 continue;
1113
1114 unsigned RCId, RCCost;
1115 getRegisterClassIDAndCost(MI, Reg, i, RCId, RCCost);
1116 if (MO.isDef()) {
1117 DenseMap<unsigned, int>::iterator CI = Cost.find(RCId);
1118 if (CI != Cost.end())
1119 CI->second += RCCost;
1120 else
1121 Cost.insert(std::make_pair(RCId, RCCost));
1122 } else if (isOperandKill(MO, MRI)) {
1123 DenseMap<unsigned, int>::iterator CI = Cost.find(RCId);
1124 if (CI != Cost.end())
1125 CI->second -= RCCost;
1126 else
1127 Cost.insert(std::make_pair(RCId, -RCCost));
1128 }
1129 }
1130
1131 // Update register pressure of blocks from loop header to current block.
1132 for (unsigned i = 0, e = BackTrace.size(); i != e; ++i) {
1133 SmallVectorImpl<unsigned> &RP = BackTrace[i];
1134 for (DenseMap<unsigned, int>::iterator CI = Cost.begin(), CE = Cost.end();
1135 CI != CE; ++CI) {
1136 unsigned RCId = CI->first;
1137 RP[RCId] += CI->second;
1138 }
1139 }
1140 }
1141
1142 /// IsProfitableToHoist - Return true if it is potentially profitable to hoist
1143 /// the given loop invariant.
1144 bool MachineLICM::IsProfitableToHoist(MachineInstr &MI) {
1145 if (MI.isImplicitDef())
1146 return true;
1147
1148 // Besides removing computation from the loop, hoisting an instruction has
1149 // these effects:
1150 //
1151 // - The value defined by the instruction becomes live across the entire
1152 // loop. This increases register pressure in the loop.
1153 //
1154 // - If the value is used by a PHI in the loop, a copy will be required for
1155 // lowering the PHI after extending the live range.
1156 //
1157 // - When hoisting the last use of a value in the loop, that value no longer
1158 // needs to be live in the loop. This lowers register pressure in the loop.
1159
1160 bool CheapInstr = IsCheapInstruction(MI);
1161 bool CreatesCopy = HasLoopPHIUse(&MI);
1162
1163 // Don't hoist a cheap instruction if it would create a copy in the loop.
1164 if (CheapInstr && CreatesCopy) {
1165 DEBUG(dbgs() << "Won't hoist cheap instr with loop PHI use: " << MI);
1166 return false;
1167 }
1168
1169 // Rematerializable instructions should always be hoisted since the register
1170 // allocator can just pull them down again when needed.
1171 if (TII->isTriviallyReMaterializable(&MI, AA))
1172 return true;
1173
1174 // Estimate register pressure to determine whether to LICM the instruction.
1175 // In low register pressure situation, we can be more aggressive about
1176 // hoisting. Also, favors hoisting long latency instructions even in
1177 // moderately high pressure situation.
1178 // Cheap instructions will only be hoisted if they don't increase register
1179 // pressure at all.
1180 // FIXME: If there are long latency loop-invariant instructions inside the
1181 // loop at this point, why didn't the optimizer's LICM hoist them?
1182 DenseMap<unsigned, int> Cost;
1183 for (unsigned i = 0, e = MI.getDesc().getNumOperands(); i != e; ++i) {
1184 const MachineOperand &MO = MI.getOperand(i);
1185 if (!MO.isReg() || MO.isImplicit())
1186 continue;
1187 unsigned Reg = MO.getReg();
1188 if (!TargetRegisterInfo::isVirtualRegister(Reg))
1189 continue;
1190
1191 unsigned RCId, RCCost;
1192 getRegisterClassIDAndCost(&MI, Reg, i, RCId, RCCost);
1193 if (MO.isDef()) {
1194 if (HasHighOperandLatency(MI, i, Reg)) {
1195 DEBUG(dbgs() << "Hoist High Latency: " << MI);
1196 ++NumHighLatency;
1197 return true;
1198 }
1199 Cost[RCId] += RCCost;
1200 } else if (isOperandKill(MO, MRI)) {
1201 // Is a virtual register use is a kill, hoisting it out of the loop
1202 // may actually reduce register pressure or be register pressure
1203 // neutral.
1204 Cost[RCId] -= RCCost;
1205 }
1206 }
1207
1208 // Visit BBs from header to current BB, if hoisting this doesn't cause
1209 // high register pressure, then it's safe to proceed.
1210 if (!CanCauseHighRegPressure(Cost, CheapInstr)) {
1211 DEBUG(dbgs() << "Hoist non-reg-pressure: " << MI);
1212 ++NumLowRP;
1213 return true;
1214 }
1215
1216 // Don't risk increasing register pressure if it would create copies.
1217 if (CreatesCopy) {
1218 DEBUG(dbgs() << "Won't hoist instr with loop PHI use: " << MI);
1219 return false;
1220 }
1221
1222 // Do not "speculate" in high register pressure situation. If an
1223 // instruction is not guaranteed to be executed in the loop, it's best to be
1224 // conservative.
1225 if (AvoidSpeculation &&
1226 (!IsGuaranteedToExecute(MI.getParent()) && !MayCSE(&MI))) {
1227 DEBUG(dbgs() << "Won't speculate: " << MI);
1228 return false;
1229 }
1230
1231 // High register pressure situation, only hoist if the instruction is going
1232 // to be remat'ed.
1233 if (!TII->isTriviallyReMaterializable(&MI, AA) &&
1234 !MI.isInvariantLoad(AA)) {
1235 DEBUG(dbgs() << "Can't remat / high reg-pressure: " << MI);
1236 return false;
1237 }
1238
1239 return true;
1240 }
1241
1242 MachineInstr *MachineLICM::ExtractHoistableLoad(MachineInstr *MI) {
1243 // Don't unfold simple loads.
1244 if (MI->canFoldAsLoad())
1245 return 0;
1246
1247 // If not, we may be able to unfold a load and hoist that.
1248 // First test whether the instruction is loading from an amenable
1249 // memory location.
1250 if (!MI->isInvariantLoad(AA))
1251 return 0;
1252
1253 // Next determine the register class for a temporary register.
1254 unsigned LoadRegIndex;
1255 unsigned NewOpc =
1256 TII->getOpcodeAfterMemoryUnfold(MI->getOpcode(),
1257 /*UnfoldLoad=*/true,
1258 /*UnfoldStore=*/false,
1259 &LoadRegIndex);
1260 if (NewOpc == 0) return 0;
1261 const MCInstrDesc &MID = TII->get(NewOpc);
1262 if (MID.getNumDefs() != 1) return 0;
1263 MachineFunction &MF = *MI->getParent()->getParent();
1264 const TargetRegisterClass *RC = TII->getRegClass(MID, LoadRegIndex, TRI, MF);
1265 // Ok, we're unfolding. Create a temporary register and do the unfold.
1266 unsigned Reg = MRI->createVirtualRegister(RC);
1267
1268 SmallVector<MachineInstr *, 2> NewMIs;
1269 bool Success =
1270 TII->unfoldMemoryOperand(MF, MI, Reg,
1271 /*UnfoldLoad=*/true, /*UnfoldStore=*/false,
1272 NewMIs);
1273 (void)Success;
1274 assert(Success &&
1275 "unfoldMemoryOperand failed when getOpcodeAfterMemoryUnfold "
1276 "succeeded!");
1277 assert(NewMIs.size() == 2 &&
1278 "Unfolded a load into multiple instructions!");
1279 MachineBasicBlock *MBB = MI->getParent();
1280 MachineBasicBlock::iterator Pos = MI;
1281 MBB->insert(Pos, NewMIs[0]);
1282 MBB->insert(Pos, NewMIs[1]);
1283 // If unfolding produced a load that wasn't loop-invariant or profitable to
1284 // hoist, discard the new instructions and bail.
1285 if (!IsLoopInvariantInst(*NewMIs[0]) || !IsProfitableToHoist(*NewMIs[0])) {
1286 NewMIs[0]->eraseFromParent();
1287 NewMIs[1]->eraseFromParent();
1288 return 0;
1289 }
1290
1291 // Update register pressure for the unfolded instruction.
1292 UpdateRegPressure(NewMIs[1]);
1293
1294 // Otherwise we successfully unfolded a load that we can hoist.
1295 MI->eraseFromParent();
1296 return NewMIs[0];
1297 }
1298
1299 void MachineLICM::InitCSEMap(MachineBasicBlock *BB) {
1300 for (MachineBasicBlock::iterator I = BB->begin(),E = BB->end(); I != E; ++I) {
1301 const MachineInstr *MI = &*I;
1302 unsigned Opcode = MI->getOpcode();
1303 DenseMap<unsigned, std::vector<const MachineInstr*> >::iterator
1304 CI = CSEMap.find(Opcode);
1305 if (CI != CSEMap.end())
1306 CI->second.push_back(MI);
1307 else {
1308 std::vector<const MachineInstr*> CSEMIs;
1309 CSEMIs.push_back(MI);
1310 CSEMap.insert(std::make_pair(Opcode, CSEMIs));
1311 }
1312 }
1313 }
1314
1315 const MachineInstr*
1316 MachineLICM::LookForDuplicate(const MachineInstr *MI,
1317 std::vector<const MachineInstr*> &PrevMIs) {
1318 for (unsigned i = 0, e = PrevMIs.size(); i != e; ++i) {
1319 const MachineInstr *PrevMI = PrevMIs[i];
1320 if (TII->produceSameValue(MI, PrevMI, (PreRegAlloc ? MRI : 0)))
1321 return PrevMI;
1322 }
1323 return 0;
1324 }
1325
1326 bool MachineLICM::EliminateCSE(MachineInstr *MI,
1327 DenseMap<unsigned, std::vector<const MachineInstr*> >::iterator &CI) {
1328 // Do not CSE implicit_def so ProcessImplicitDefs can properly propagate
1329 // the undef property onto uses.
1330 if (CI == CSEMap.end() || MI->isImplicitDef())
1331 return false;
1332
1333 if (const MachineInstr *Dup = LookForDuplicate(MI, CI->second)) {
1334 DEBUG(dbgs() << "CSEing " << *MI << " with " << *Dup);
1335
1336 // Replace virtual registers defined by MI by their counterparts defined
1337 // by Dup.
1338 SmallVector<unsigned, 2> Defs;
1339 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1340 const MachineOperand &MO = MI->getOperand(i);
1341
1342 // Physical registers may not differ here.
1343 assert((!MO.isReg() || MO.getReg() == 0 ||
1344 !TargetRegisterInfo::isPhysicalRegister(MO.getReg()) ||
1345 MO.getReg() == Dup->getOperand(i).getReg()) &&
1346 "Instructions with different phys regs are not identical!");
1347
1348 if (MO.isReg() && MO.isDef() &&
1349 !TargetRegisterInfo::isPhysicalRegister(MO.getReg()))
1350 Defs.push_back(i);
1351 }
1352
1353 SmallVector<const TargetRegisterClass*, 2> OrigRCs;
1354 for (unsigned i = 0, e = Defs.size(); i != e; ++i) {
1355 unsigned Idx = Defs[i];
1356 unsigned Reg = MI->getOperand(Idx).getReg();
1357 unsigned DupReg = Dup->getOperand(Idx).getReg();
1358 OrigRCs.push_back(MRI->getRegClass(DupReg));
1359
1360 if (!MRI->constrainRegClass(DupReg, MRI->getRegClass(Reg))) {
1361 // Restore old RCs if more than one defs.
1362 for (unsigned j = 0; j != i; ++j)
1363 MRI->setRegClass(Dup->getOperand(Defs[j]).getReg(), OrigRCs[j]);
1364 return false;
1365 }
1366 }
1367
1368 for (unsigned i = 0, e = Defs.size(); i != e; ++i) {
1369 unsigned Idx = Defs[i];
1370 unsigned Reg = MI->getOperand(Idx).getReg();
1371 unsigned DupReg = Dup->getOperand(Idx).getReg();
1372 MRI->replaceRegWith(Reg, DupReg);
1373 MRI->clearKillFlags(DupReg);
1374 }
1375
1376 MI->eraseFromParent();
1377 ++NumCSEed;
1378 return true;
1379 }
1380 return false;
1381 }
1382
1383 /// MayCSE - Return true if the given instruction will be CSE'd if it's
1384 /// hoisted out of the loop.
1385 bool MachineLICM::MayCSE(MachineInstr *MI) {
1386 unsigned Opcode = MI->getOpcode();
1387 DenseMap<unsigned, std::vector<const MachineInstr*> >::iterator
1388 CI = CSEMap.find(Opcode);
1389 // Do not CSE implicit_def so ProcessImplicitDefs can properly propagate
1390 // the undef property onto uses.
1391 if (CI == CSEMap.end() || MI->isImplicitDef())
1392 return false;
1393
1394 return LookForDuplicate(MI, CI->second) != 0;
1395 }
1396
1397 /// Hoist - When an instruction is found to use only loop invariant operands
1398 /// that are safe to hoist, this instruction is called to do the dirty work.
1399 ///
1400 bool MachineLICM::Hoist(MachineInstr *MI, MachineBasicBlock *Preheader) {
1401 // First check whether we should hoist this instruction.
1402 if (!IsLoopInvariantInst(*MI) || !IsProfitableToHoist(*MI)) {
1403 // If not, try unfolding a hoistable load.
1404 MI = ExtractHoistableLoad(MI);
1405 if (!MI) return false;
1406 }
1407
1408 // Now move the instructions to the predecessor, inserting it before any
1409 // terminator instructions.
1410 DEBUG({
1411 dbgs() << "Hoisting " << *MI;
1412 if (Preheader->getBasicBlock())
1413 dbgs() << " to MachineBasicBlock "
1414 << Preheader->getName();
1415 if (MI->getParent()->getBasicBlock())
1416 dbgs() << " from MachineBasicBlock "
1417 << MI->getParent()->getName();
1418 dbgs() << "\n";
1419 });
1420
1421 // If this is the first instruction being hoisted to the preheader,
1422 // initialize the CSE map with potential common expressions.
1423 if (FirstInLoop) {
1424 InitCSEMap(Preheader);
1425 FirstInLoop = false;
1426 }
1427
1428 // Look for opportunity to CSE the hoisted instruction.
1429 unsigned Opcode = MI->getOpcode();
1430 DenseMap<unsigned, std::vector<const MachineInstr*> >::iterator
1431 CI = CSEMap.find(Opcode);
1432 if (!EliminateCSE(MI, CI)) {
1433 // Otherwise, splice the instruction to the preheader.
1434 Preheader->splice(Preheader->getFirstTerminator(),MI->getParent(),MI);
1435
1436 // Update register pressure for BBs from header to this block.
1437 UpdateBackTraceRegPressure(MI);
1438
1439 // Clear the kill flags of any register this instruction defines,
1440 // since they may need to be live throughout the entire loop
1441 // rather than just live for part of it.
1442 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1443 MachineOperand &MO = MI->getOperand(i);
1444 if (MO.isReg() && MO.isDef() && !MO.isDead())
1445 MRI->clearKillFlags(MO.getReg());
1446 }
1447
1448 // Add to the CSE map.
1449 if (CI != CSEMap.end())
1450 CI->second.push_back(MI);
1451 else {
1452 std::vector<const MachineInstr*> CSEMIs;
1453 CSEMIs.push_back(MI);
1454 CSEMap.insert(std::make_pair(Opcode, CSEMIs));
1455 }
1456 }
1457
1458 ++NumHoisted;
1459 Changed = true;
1460
1461 return true;
1462 }
1463
1464 MachineBasicBlock *MachineLICM::getCurPreheader() {
1465 // Determine the block to which to hoist instructions. If we can't find a
1466 // suitable loop predecessor, we can't do any hoisting.
1467
1468 // If we've tried to get a preheader and failed, don't try again.
1469 if (CurPreheader == reinterpret_cast<MachineBasicBlock *>(-1))
1470 return 0;
1471
1472 if (!CurPreheader) {
1473 CurPreheader = CurLoop->getLoopPreheader();
1474 if (!CurPreheader) {
1475 MachineBasicBlock *Pred = CurLoop->getLoopPredecessor();
1476 if (!Pred) {
1477 CurPreheader = reinterpret_cast<MachineBasicBlock *>(-1);
1478 return 0;
1479 }
1480
1481 CurPreheader = Pred->SplitCriticalEdge(CurLoop->getHeader(), this);
1482 if (!CurPreheader) {
1483 CurPreheader = reinterpret_cast<MachineBasicBlock *>(-1);
1484 return 0;
1485 }
1486 }
1487 }
1488 return CurPreheader;
1489 }