Mercurial > hg > CbC > CbC_llvm
comparison lib/CodeGen/LiveIntervals.cpp @ 134:3a76565eade5 LLVM5.0.1
update 5.0.1
author | mir3636 |
---|---|
date | Sat, 17 Feb 2018 09:57:20 +0900 |
parents | |
children | c2174574ed3a |
comparison
equal
deleted
inserted
replaced
133:c60214abe0e8 | 134:3a76565eade5 |
---|---|
1 //===- LiveIntervals.cpp - Live Interval Analysis -------------------------===// | |
2 // | |
3 // The LLVM Compiler Infrastructure | |
4 // | |
5 // This file is distributed under the University of Illinois Open Source | |
6 // License. See LICENSE.TXT for details. | |
7 // | |
8 //===----------------------------------------------------------------------===// | |
9 // | |
10 /// \file This file implements the LiveInterval analysis pass which is used | |
11 /// by the Linear Scan Register allocator. This pass linearizes the | |
12 /// basic blocks of the function in DFS order and computes live intervals for | |
13 /// each virtual and physical register. | |
14 // | |
15 //===----------------------------------------------------------------------===// | |
16 | |
17 #include "llvm/CodeGen/LiveIntervals.h" | |
18 #include "LiveRangeCalc.h" | |
19 #include "llvm/ADT/ArrayRef.h" | |
20 #include "llvm/ADT/DepthFirstIterator.h" | |
21 #include "llvm/ADT/SmallPtrSet.h" | |
22 #include "llvm/ADT/SmallVector.h" | |
23 #include "llvm/ADT/iterator_range.h" | |
24 #include "llvm/Analysis/AliasAnalysis.h" | |
25 #include "llvm/CodeGen/LiveInterval.h" | |
26 #include "llvm/CodeGen/LiveVariables.h" | |
27 #include "llvm/CodeGen/MachineBasicBlock.h" | |
28 #include "llvm/CodeGen/MachineBlockFrequencyInfo.h" | |
29 #include "llvm/CodeGen/MachineDominators.h" | |
30 #include "llvm/CodeGen/MachineFunction.h" | |
31 #include "llvm/CodeGen/MachineInstr.h" | |
32 #include "llvm/CodeGen/MachineInstrBundle.h" | |
33 #include "llvm/CodeGen/MachineOperand.h" | |
34 #include "llvm/CodeGen/MachineRegisterInfo.h" | |
35 #include "llvm/CodeGen/Passes.h" | |
36 #include "llvm/CodeGen/SlotIndexes.h" | |
37 #include "llvm/CodeGen/TargetRegisterInfo.h" | |
38 #include "llvm/CodeGen/TargetSubtargetInfo.h" | |
39 #include "llvm/CodeGen/VirtRegMap.h" | |
40 #include "llvm/MC/LaneBitmask.h" | |
41 #include "llvm/MC/MCRegisterInfo.h" | |
42 #include "llvm/Pass.h" | |
43 #include "llvm/Support/BlockFrequency.h" | |
44 #include "llvm/Support/CommandLine.h" | |
45 #include "llvm/Support/Compiler.h" | |
46 #include "llvm/Support/Debug.h" | |
47 #include "llvm/Support/MathExtras.h" | |
48 #include "llvm/Support/raw_ostream.h" | |
49 #include <algorithm> | |
50 #include <cassert> | |
51 #include <cstdint> | |
52 #include <iterator> | |
53 #include <tuple> | |
54 #include <utility> | |
55 | |
56 using namespace llvm; | |
57 | |
58 #define DEBUG_TYPE "regalloc" | |
59 | |
60 char LiveIntervals::ID = 0; | |
61 char &llvm::LiveIntervalsID = LiveIntervals::ID; | |
62 INITIALIZE_PASS_BEGIN(LiveIntervals, "liveintervals", | |
63 "Live Interval Analysis", false, false) | |
64 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass) | |
65 INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree) | |
66 INITIALIZE_PASS_DEPENDENCY(SlotIndexes) | |
67 INITIALIZE_PASS_END(LiveIntervals, "liveintervals", | |
68 "Live Interval Analysis", false, false) | |
69 | |
70 #ifndef NDEBUG | |
71 static cl::opt<bool> EnablePrecomputePhysRegs( | |
72 "precompute-phys-liveness", cl::Hidden, | |
73 cl::desc("Eagerly compute live intervals for all physreg units.")); | |
74 #else | |
75 static bool EnablePrecomputePhysRegs = false; | |
76 #endif // NDEBUG | |
77 | |
78 namespace llvm { | |
79 | |
80 cl::opt<bool> UseSegmentSetForPhysRegs( | |
81 "use-segment-set-for-physregs", cl::Hidden, cl::init(true), | |
82 cl::desc( | |
83 "Use segment set for the computation of the live ranges of physregs.")); | |
84 | |
85 } // end namespace llvm | |
86 | |
87 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const { | |
88 AU.setPreservesCFG(); | |
89 AU.addRequired<AAResultsWrapperPass>(); | |
90 AU.addPreserved<AAResultsWrapperPass>(); | |
91 AU.addPreserved<LiveVariables>(); | |
92 AU.addPreservedID(MachineLoopInfoID); | |
93 AU.addRequiredTransitiveID(MachineDominatorsID); | |
94 AU.addPreservedID(MachineDominatorsID); | |
95 AU.addPreserved<SlotIndexes>(); | |
96 AU.addRequiredTransitive<SlotIndexes>(); | |
97 MachineFunctionPass::getAnalysisUsage(AU); | |
98 } | |
99 | |
100 LiveIntervals::LiveIntervals() : MachineFunctionPass(ID) { | |
101 initializeLiveIntervalsPass(*PassRegistry::getPassRegistry()); | |
102 } | |
103 | |
104 LiveIntervals::~LiveIntervals() { | |
105 delete LRCalc; | |
106 } | |
107 | |
108 void LiveIntervals::releaseMemory() { | |
109 // Free the live intervals themselves. | |
110 for (unsigned i = 0, e = VirtRegIntervals.size(); i != e; ++i) | |
111 delete VirtRegIntervals[TargetRegisterInfo::index2VirtReg(i)]; | |
112 VirtRegIntervals.clear(); | |
113 RegMaskSlots.clear(); | |
114 RegMaskBits.clear(); | |
115 RegMaskBlocks.clear(); | |
116 | |
117 for (LiveRange *LR : RegUnitRanges) | |
118 delete LR; | |
119 RegUnitRanges.clear(); | |
120 | |
121 // Release VNInfo memory regions, VNInfo objects don't need to be dtor'd. | |
122 VNInfoAllocator.Reset(); | |
123 } | |
124 | |
125 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) { | |
126 MF = &fn; | |
127 MRI = &MF->getRegInfo(); | |
128 TRI = MF->getSubtarget().getRegisterInfo(); | |
129 TII = MF->getSubtarget().getInstrInfo(); | |
130 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults(); | |
131 Indexes = &getAnalysis<SlotIndexes>(); | |
132 DomTree = &getAnalysis<MachineDominatorTree>(); | |
133 | |
134 if (!LRCalc) | |
135 LRCalc = new LiveRangeCalc(); | |
136 | |
137 // Allocate space for all virtual registers. | |
138 VirtRegIntervals.resize(MRI->getNumVirtRegs()); | |
139 | |
140 computeVirtRegs(); | |
141 computeRegMasks(); | |
142 computeLiveInRegUnits(); | |
143 | |
144 if (EnablePrecomputePhysRegs) { | |
145 // For stress testing, precompute live ranges of all physical register | |
146 // units, including reserved registers. | |
147 for (unsigned i = 0, e = TRI->getNumRegUnits(); i != e; ++i) | |
148 getRegUnit(i); | |
149 } | |
150 DEBUG(dump()); | |
151 return true; | |
152 } | |
153 | |
154 void LiveIntervals::print(raw_ostream &OS, const Module* ) const { | |
155 OS << "********** INTERVALS **********\n"; | |
156 | |
157 // Dump the regunits. | |
158 for (unsigned Unit = 0, UnitE = RegUnitRanges.size(); Unit != UnitE; ++Unit) | |
159 if (LiveRange *LR = RegUnitRanges[Unit]) | |
160 OS << printRegUnit(Unit, TRI) << ' ' << *LR << '\n'; | |
161 | |
162 // Dump the virtregs. | |
163 for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) { | |
164 unsigned Reg = TargetRegisterInfo::index2VirtReg(i); | |
165 if (hasInterval(Reg)) | |
166 OS << getInterval(Reg) << '\n'; | |
167 } | |
168 | |
169 OS << "RegMasks:"; | |
170 for (SlotIndex Idx : RegMaskSlots) | |
171 OS << ' ' << Idx; | |
172 OS << '\n'; | |
173 | |
174 printInstrs(OS); | |
175 } | |
176 | |
177 void LiveIntervals::printInstrs(raw_ostream &OS) const { | |
178 OS << "********** MACHINEINSTRS **********\n"; | |
179 MF->print(OS, Indexes); | |
180 } | |
181 | |
182 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) | |
183 LLVM_DUMP_METHOD void LiveIntervals::dumpInstrs() const { | |
184 printInstrs(dbgs()); | |
185 } | |
186 #endif | |
187 | |
188 LiveInterval* LiveIntervals::createInterval(unsigned reg) { | |
189 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ? huge_valf : 0.0F; | |
190 return new LiveInterval(reg, Weight); | |
191 } | |
192 | |
193 /// Compute the live interval of a virtual register, based on defs and uses. | |
194 void LiveIntervals::computeVirtRegInterval(LiveInterval &LI) { | |
195 assert(LRCalc && "LRCalc not initialized."); | |
196 assert(LI.empty() && "Should only compute empty intervals."); | |
197 LRCalc->reset(MF, getSlotIndexes(), DomTree, &getVNInfoAllocator()); | |
198 LRCalc->calculate(LI, MRI->shouldTrackSubRegLiveness(LI.reg)); | |
199 computeDeadValues(LI, nullptr); | |
200 } | |
201 | |
202 void LiveIntervals::computeVirtRegs() { | |
203 for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) { | |
204 unsigned Reg = TargetRegisterInfo::index2VirtReg(i); | |
205 if (MRI->reg_nodbg_empty(Reg)) | |
206 continue; | |
207 createAndComputeVirtRegInterval(Reg); | |
208 } | |
209 } | |
210 | |
211 void LiveIntervals::computeRegMasks() { | |
212 RegMaskBlocks.resize(MF->getNumBlockIDs()); | |
213 | |
214 // Find all instructions with regmask operands. | |
215 for (const MachineBasicBlock &MBB : *MF) { | |
216 std::pair<unsigned, unsigned> &RMB = RegMaskBlocks[MBB.getNumber()]; | |
217 RMB.first = RegMaskSlots.size(); | |
218 | |
219 // Some block starts, such as EH funclets, create masks. | |
220 if (const uint32_t *Mask = MBB.getBeginClobberMask(TRI)) { | |
221 RegMaskSlots.push_back(Indexes->getMBBStartIdx(&MBB)); | |
222 RegMaskBits.push_back(Mask); | |
223 } | |
224 | |
225 for (const MachineInstr &MI : MBB) { | |
226 for (const MachineOperand &MO : MI.operands()) { | |
227 if (!MO.isRegMask()) | |
228 continue; | |
229 RegMaskSlots.push_back(Indexes->getInstructionIndex(MI).getRegSlot()); | |
230 RegMaskBits.push_back(MO.getRegMask()); | |
231 } | |
232 } | |
233 | |
234 // Some block ends, such as funclet returns, create masks. Put the mask on | |
235 // the last instruction of the block, because MBB slot index intervals are | |
236 // half-open. | |
237 if (const uint32_t *Mask = MBB.getEndClobberMask(TRI)) { | |
238 assert(!MBB.empty() && "empty return block?"); | |
239 RegMaskSlots.push_back( | |
240 Indexes->getInstructionIndex(MBB.back()).getRegSlot()); | |
241 RegMaskBits.push_back(Mask); | |
242 } | |
243 | |
244 // Compute the number of register mask instructions in this block. | |
245 RMB.second = RegMaskSlots.size() - RMB.first; | |
246 } | |
247 } | |
248 | |
249 //===----------------------------------------------------------------------===// | |
250 // Register Unit Liveness | |
251 //===----------------------------------------------------------------------===// | |
252 // | |
253 // Fixed interference typically comes from ABI boundaries: Function arguments | |
254 // and return values are passed in fixed registers, and so are exception | |
255 // pointers entering landing pads. Certain instructions require values to be | |
256 // present in specific registers. That is also represented through fixed | |
257 // interference. | |
258 // | |
259 | |
260 /// Compute the live range of a register unit, based on the uses and defs of | |
261 /// aliasing registers. The range should be empty, or contain only dead | |
262 /// phi-defs from ABI blocks. | |
263 void LiveIntervals::computeRegUnitRange(LiveRange &LR, unsigned Unit) { | |
264 assert(LRCalc && "LRCalc not initialized."); | |
265 LRCalc->reset(MF, getSlotIndexes(), DomTree, &getVNInfoAllocator()); | |
266 | |
267 // The physregs aliasing Unit are the roots and their super-registers. | |
268 // Create all values as dead defs before extending to uses. Note that roots | |
269 // may share super-registers. That's OK because createDeadDefs() is | |
270 // idempotent. It is very rare for a register unit to have multiple roots, so | |
271 // uniquing super-registers is probably not worthwhile. | |
272 bool IsReserved = false; | |
273 for (MCRegUnitRootIterator Root(Unit, TRI); Root.isValid(); ++Root) { | |
274 bool IsRootReserved = true; | |
275 for (MCSuperRegIterator Super(*Root, TRI, /*IncludeSelf=*/true); | |
276 Super.isValid(); ++Super) { | |
277 unsigned Reg = *Super; | |
278 if (!MRI->reg_empty(Reg)) | |
279 LRCalc->createDeadDefs(LR, Reg); | |
280 // A register unit is considered reserved if all its roots and all their | |
281 // super registers are reserved. | |
282 if (!MRI->isReserved(Reg)) | |
283 IsRootReserved = false; | |
284 } | |
285 IsReserved |= IsRootReserved; | |
286 } | |
287 assert(IsReserved == MRI->isReservedRegUnit(Unit) && | |
288 "reserved computation mismatch"); | |
289 | |
290 // Now extend LR to reach all uses. | |
291 // Ignore uses of reserved registers. We only track defs of those. | |
292 if (!IsReserved) { | |
293 for (MCRegUnitRootIterator Root(Unit, TRI); Root.isValid(); ++Root) { | |
294 for (MCSuperRegIterator Super(*Root, TRI, /*IncludeSelf=*/true); | |
295 Super.isValid(); ++Super) { | |
296 unsigned Reg = *Super; | |
297 if (!MRI->reg_empty(Reg)) | |
298 LRCalc->extendToUses(LR, Reg); | |
299 } | |
300 } | |
301 } | |
302 | |
303 // Flush the segment set to the segment vector. | |
304 if (UseSegmentSetForPhysRegs) | |
305 LR.flushSegmentSet(); | |
306 } | |
307 | |
308 /// Precompute the live ranges of any register units that are live-in to an ABI | |
309 /// block somewhere. Register values can appear without a corresponding def when | |
310 /// entering the entry block or a landing pad. | |
311 void LiveIntervals::computeLiveInRegUnits() { | |
312 RegUnitRanges.resize(TRI->getNumRegUnits()); | |
313 DEBUG(dbgs() << "Computing live-in reg-units in ABI blocks.\n"); | |
314 | |
315 // Keep track of the live range sets allocated. | |
316 SmallVector<unsigned, 8> NewRanges; | |
317 | |
318 // Check all basic blocks for live-ins. | |
319 for (const MachineBasicBlock &MBB : *MF) { | |
320 // We only care about ABI blocks: Entry + landing pads. | |
321 if ((&MBB != &MF->front() && !MBB.isEHPad()) || MBB.livein_empty()) | |
322 continue; | |
323 | |
324 // Create phi-defs at Begin for all live-in registers. | |
325 SlotIndex Begin = Indexes->getMBBStartIdx(&MBB); | |
326 DEBUG(dbgs() << Begin << "\t" << printMBBReference(MBB)); | |
327 for (const auto &LI : MBB.liveins()) { | |
328 for (MCRegUnitIterator Units(LI.PhysReg, TRI); Units.isValid(); ++Units) { | |
329 unsigned Unit = *Units; | |
330 LiveRange *LR = RegUnitRanges[Unit]; | |
331 if (!LR) { | |
332 // Use segment set to speed-up initial computation of the live range. | |
333 LR = RegUnitRanges[Unit] = new LiveRange(UseSegmentSetForPhysRegs); | |
334 NewRanges.push_back(Unit); | |
335 } | |
336 VNInfo *VNI = LR->createDeadDef(Begin, getVNInfoAllocator()); | |
337 (void)VNI; | |
338 DEBUG(dbgs() << ' ' << printRegUnit(Unit, TRI) << '#' << VNI->id); | |
339 } | |
340 } | |
341 DEBUG(dbgs() << '\n'); | |
342 } | |
343 DEBUG(dbgs() << "Created " << NewRanges.size() << " new intervals.\n"); | |
344 | |
345 // Compute the 'normal' part of the ranges. | |
346 for (unsigned Unit : NewRanges) | |
347 computeRegUnitRange(*RegUnitRanges[Unit], Unit); | |
348 } | |
349 | |
350 static void createSegmentsForValues(LiveRange &LR, | |
351 iterator_range<LiveInterval::vni_iterator> VNIs) { | |
352 for (VNInfo *VNI : VNIs) { | |
353 if (VNI->isUnused()) | |
354 continue; | |
355 SlotIndex Def = VNI->def; | |
356 LR.addSegment(LiveRange::Segment(Def, Def.getDeadSlot(), VNI)); | |
357 } | |
358 } | |
359 | |
360 using ShrinkToUsesWorkList = SmallVector<std::pair<SlotIndex, VNInfo*>, 16>; | |
361 | |
362 static void extendSegmentsToUses(LiveRange &LR, const SlotIndexes &Indexes, | |
363 ShrinkToUsesWorkList &WorkList, | |
364 const LiveRange &OldRange) { | |
365 // Keep track of the PHIs that are in use. | |
366 SmallPtrSet<VNInfo*, 8> UsedPHIs; | |
367 // Blocks that have already been added to WorkList as live-out. | |
368 SmallPtrSet<const MachineBasicBlock*, 16> LiveOut; | |
369 | |
370 // Extend intervals to reach all uses in WorkList. | |
371 while (!WorkList.empty()) { | |
372 SlotIndex Idx = WorkList.back().first; | |
373 VNInfo *VNI = WorkList.back().second; | |
374 WorkList.pop_back(); | |
375 const MachineBasicBlock *MBB = Indexes.getMBBFromIndex(Idx.getPrevSlot()); | |
376 SlotIndex BlockStart = Indexes.getMBBStartIdx(MBB); | |
377 | |
378 // Extend the live range for VNI to be live at Idx. | |
379 if (VNInfo *ExtVNI = LR.extendInBlock(BlockStart, Idx)) { | |
380 assert(ExtVNI == VNI && "Unexpected existing value number"); | |
381 (void)ExtVNI; | |
382 // Is this a PHIDef we haven't seen before? | |
383 if (!VNI->isPHIDef() || VNI->def != BlockStart || | |
384 !UsedPHIs.insert(VNI).second) | |
385 continue; | |
386 // The PHI is live, make sure the predecessors are live-out. | |
387 for (const MachineBasicBlock *Pred : MBB->predecessors()) { | |
388 if (!LiveOut.insert(Pred).second) | |
389 continue; | |
390 SlotIndex Stop = Indexes.getMBBEndIdx(Pred); | |
391 // A predecessor is not required to have a live-out value for a PHI. | |
392 if (VNInfo *PVNI = OldRange.getVNInfoBefore(Stop)) | |
393 WorkList.push_back(std::make_pair(Stop, PVNI)); | |
394 } | |
395 continue; | |
396 } | |
397 | |
398 // VNI is live-in to MBB. | |
399 DEBUG(dbgs() << " live-in at " << BlockStart << '\n'); | |
400 LR.addSegment(LiveRange::Segment(BlockStart, Idx, VNI)); | |
401 | |
402 // Make sure VNI is live-out from the predecessors. | |
403 for (const MachineBasicBlock *Pred : MBB->predecessors()) { | |
404 if (!LiveOut.insert(Pred).second) | |
405 continue; | |
406 SlotIndex Stop = Indexes.getMBBEndIdx(Pred); | |
407 assert(OldRange.getVNInfoBefore(Stop) == VNI && | |
408 "Wrong value out of predecessor"); | |
409 WorkList.push_back(std::make_pair(Stop, VNI)); | |
410 } | |
411 } | |
412 } | |
413 | |
414 bool LiveIntervals::shrinkToUses(LiveInterval *li, | |
415 SmallVectorImpl<MachineInstr*> *dead) { | |
416 DEBUG(dbgs() << "Shrink: " << *li << '\n'); | |
417 assert(TargetRegisterInfo::isVirtualRegister(li->reg) | |
418 && "Can only shrink virtual registers"); | |
419 | |
420 // Shrink subregister live ranges. | |
421 bool NeedsCleanup = false; | |
422 for (LiveInterval::SubRange &S : li->subranges()) { | |
423 shrinkToUses(S, li->reg); | |
424 if (S.empty()) | |
425 NeedsCleanup = true; | |
426 } | |
427 if (NeedsCleanup) | |
428 li->removeEmptySubRanges(); | |
429 | |
430 // Find all the values used, including PHI kills. | |
431 ShrinkToUsesWorkList WorkList; | |
432 | |
433 // Visit all instructions reading li->reg. | |
434 unsigned Reg = li->reg; | |
435 for (MachineInstr &UseMI : MRI->reg_instructions(Reg)) { | |
436 if (UseMI.isDebugValue() || !UseMI.readsVirtualRegister(Reg)) | |
437 continue; | |
438 SlotIndex Idx = getInstructionIndex(UseMI).getRegSlot(); | |
439 LiveQueryResult LRQ = li->Query(Idx); | |
440 VNInfo *VNI = LRQ.valueIn(); | |
441 if (!VNI) { | |
442 // This shouldn't happen: readsVirtualRegister returns true, but there is | |
443 // no live value. It is likely caused by a target getting <undef> flags | |
444 // wrong. | |
445 DEBUG(dbgs() << Idx << '\t' << UseMI | |
446 << "Warning: Instr claims to read non-existent value in " | |
447 << *li << '\n'); | |
448 continue; | |
449 } | |
450 // Special case: An early-clobber tied operand reads and writes the | |
451 // register one slot early. | |
452 if (VNInfo *DefVNI = LRQ.valueDefined()) | |
453 Idx = DefVNI->def; | |
454 | |
455 WorkList.push_back(std::make_pair(Idx, VNI)); | |
456 } | |
457 | |
458 // Create new live ranges with only minimal live segments per def. | |
459 LiveRange NewLR; | |
460 createSegmentsForValues(NewLR, make_range(li->vni_begin(), li->vni_end())); | |
461 extendSegmentsToUses(NewLR, *Indexes, WorkList, *li); | |
462 | |
463 // Move the trimmed segments back. | |
464 li->segments.swap(NewLR.segments); | |
465 | |
466 // Handle dead values. | |
467 bool CanSeparate = computeDeadValues(*li, dead); | |
468 DEBUG(dbgs() << "Shrunk: " << *li << '\n'); | |
469 return CanSeparate; | |
470 } | |
471 | |
472 bool LiveIntervals::computeDeadValues(LiveInterval &LI, | |
473 SmallVectorImpl<MachineInstr*> *dead) { | |
474 bool MayHaveSplitComponents = false; | |
475 for (VNInfo *VNI : LI.valnos) { | |
476 if (VNI->isUnused()) | |
477 continue; | |
478 SlotIndex Def = VNI->def; | |
479 LiveRange::iterator I = LI.FindSegmentContaining(Def); | |
480 assert(I != LI.end() && "Missing segment for VNI"); | |
481 | |
482 // Is the register live before? Otherwise we may have to add a read-undef | |
483 // flag for subregister defs. | |
484 unsigned VReg = LI.reg; | |
485 if (MRI->shouldTrackSubRegLiveness(VReg)) { | |
486 if ((I == LI.begin() || std::prev(I)->end < Def) && !VNI->isPHIDef()) { | |
487 MachineInstr *MI = getInstructionFromIndex(Def); | |
488 MI->setRegisterDefReadUndef(VReg); | |
489 } | |
490 } | |
491 | |
492 if (I->end != Def.getDeadSlot()) | |
493 continue; | |
494 if (VNI->isPHIDef()) { | |
495 // This is a dead PHI. Remove it. | |
496 VNI->markUnused(); | |
497 LI.removeSegment(I); | |
498 DEBUG(dbgs() << "Dead PHI at " << Def << " may separate interval\n"); | |
499 MayHaveSplitComponents = true; | |
500 } else { | |
501 // This is a dead def. Make sure the instruction knows. | |
502 MachineInstr *MI = getInstructionFromIndex(Def); | |
503 assert(MI && "No instruction defining live value"); | |
504 MI->addRegisterDead(LI.reg, TRI); | |
505 if (dead && MI->allDefsAreDead()) { | |
506 DEBUG(dbgs() << "All defs dead: " << Def << '\t' << *MI); | |
507 dead->push_back(MI); | |
508 } | |
509 } | |
510 } | |
511 return MayHaveSplitComponents; | |
512 } | |
513 | |
514 void LiveIntervals::shrinkToUses(LiveInterval::SubRange &SR, unsigned Reg) { | |
515 DEBUG(dbgs() << "Shrink: " << SR << '\n'); | |
516 assert(TargetRegisterInfo::isVirtualRegister(Reg) | |
517 && "Can only shrink virtual registers"); | |
518 // Find all the values used, including PHI kills. | |
519 ShrinkToUsesWorkList WorkList; | |
520 | |
521 // Visit all instructions reading Reg. | |
522 SlotIndex LastIdx; | |
523 for (MachineOperand &MO : MRI->use_nodbg_operands(Reg)) { | |
524 // Skip "undef" uses. | |
525 if (!MO.readsReg()) | |
526 continue; | |
527 // Maybe the operand is for a subregister we don't care about. | |
528 unsigned SubReg = MO.getSubReg(); | |
529 if (SubReg != 0) { | |
530 LaneBitmask LaneMask = TRI->getSubRegIndexLaneMask(SubReg); | |
531 if ((LaneMask & SR.LaneMask).none()) | |
532 continue; | |
533 } | |
534 // We only need to visit each instruction once. | |
535 MachineInstr *UseMI = MO.getParent(); | |
536 SlotIndex Idx = getInstructionIndex(*UseMI).getRegSlot(); | |
537 if (Idx == LastIdx) | |
538 continue; | |
539 LastIdx = Idx; | |
540 | |
541 LiveQueryResult LRQ = SR.Query(Idx); | |
542 VNInfo *VNI = LRQ.valueIn(); | |
543 // For Subranges it is possible that only undef values are left in that | |
544 // part of the subregister, so there is no real liverange at the use | |
545 if (!VNI) | |
546 continue; | |
547 | |
548 // Special case: An early-clobber tied operand reads and writes the | |
549 // register one slot early. | |
550 if (VNInfo *DefVNI = LRQ.valueDefined()) | |
551 Idx = DefVNI->def; | |
552 | |
553 WorkList.push_back(std::make_pair(Idx, VNI)); | |
554 } | |
555 | |
556 // Create a new live ranges with only minimal live segments per def. | |
557 LiveRange NewLR; | |
558 createSegmentsForValues(NewLR, make_range(SR.vni_begin(), SR.vni_end())); | |
559 extendSegmentsToUses(NewLR, *Indexes, WorkList, SR); | |
560 | |
561 // Move the trimmed ranges back. | |
562 SR.segments.swap(NewLR.segments); | |
563 | |
564 // Remove dead PHI value numbers | |
565 for (VNInfo *VNI : SR.valnos) { | |
566 if (VNI->isUnused()) | |
567 continue; | |
568 const LiveRange::Segment *Segment = SR.getSegmentContaining(VNI->def); | |
569 assert(Segment != nullptr && "Missing segment for VNI"); | |
570 if (Segment->end != VNI->def.getDeadSlot()) | |
571 continue; | |
572 if (VNI->isPHIDef()) { | |
573 // This is a dead PHI. Remove it. | |
574 DEBUG(dbgs() << "Dead PHI at " << VNI->def << " may separate interval\n"); | |
575 VNI->markUnused(); | |
576 SR.removeSegment(*Segment); | |
577 } | |
578 } | |
579 | |
580 DEBUG(dbgs() << "Shrunk: " << SR << '\n'); | |
581 } | |
582 | |
583 void LiveIntervals::extendToIndices(LiveRange &LR, | |
584 ArrayRef<SlotIndex> Indices, | |
585 ArrayRef<SlotIndex> Undefs) { | |
586 assert(LRCalc && "LRCalc not initialized."); | |
587 LRCalc->reset(MF, getSlotIndexes(), DomTree, &getVNInfoAllocator()); | |
588 for (SlotIndex Idx : Indices) | |
589 LRCalc->extend(LR, Idx, /*PhysReg=*/0, Undefs); | |
590 } | |
591 | |
592 void LiveIntervals::pruneValue(LiveRange &LR, SlotIndex Kill, | |
593 SmallVectorImpl<SlotIndex> *EndPoints) { | |
594 LiveQueryResult LRQ = LR.Query(Kill); | |
595 VNInfo *VNI = LRQ.valueOutOrDead(); | |
596 if (!VNI) | |
597 return; | |
598 | |
599 MachineBasicBlock *KillMBB = Indexes->getMBBFromIndex(Kill); | |
600 SlotIndex MBBEnd = Indexes->getMBBEndIdx(KillMBB); | |
601 | |
602 // If VNI isn't live out from KillMBB, the value is trivially pruned. | |
603 if (LRQ.endPoint() < MBBEnd) { | |
604 LR.removeSegment(Kill, LRQ.endPoint()); | |
605 if (EndPoints) EndPoints->push_back(LRQ.endPoint()); | |
606 return; | |
607 } | |
608 | |
609 // VNI is live out of KillMBB. | |
610 LR.removeSegment(Kill, MBBEnd); | |
611 if (EndPoints) EndPoints->push_back(MBBEnd); | |
612 | |
613 // Find all blocks that are reachable from KillMBB without leaving VNI's live | |
614 // range. It is possible that KillMBB itself is reachable, so start a DFS | |
615 // from each successor. | |
616 using VisitedTy = df_iterator_default_set<MachineBasicBlock*,9>; | |
617 VisitedTy Visited; | |
618 for (MachineBasicBlock *Succ : KillMBB->successors()) { | |
619 for (df_ext_iterator<MachineBasicBlock*, VisitedTy> | |
620 I = df_ext_begin(Succ, Visited), E = df_ext_end(Succ, Visited); | |
621 I != E;) { | |
622 MachineBasicBlock *MBB = *I; | |
623 | |
624 // Check if VNI is live in to MBB. | |
625 SlotIndex MBBStart, MBBEnd; | |
626 std::tie(MBBStart, MBBEnd) = Indexes->getMBBRange(MBB); | |
627 LiveQueryResult LRQ = LR.Query(MBBStart); | |
628 if (LRQ.valueIn() != VNI) { | |
629 // This block isn't part of the VNI segment. Prune the search. | |
630 I.skipChildren(); | |
631 continue; | |
632 } | |
633 | |
634 // Prune the search if VNI is killed in MBB. | |
635 if (LRQ.endPoint() < MBBEnd) { | |
636 LR.removeSegment(MBBStart, LRQ.endPoint()); | |
637 if (EndPoints) EndPoints->push_back(LRQ.endPoint()); | |
638 I.skipChildren(); | |
639 continue; | |
640 } | |
641 | |
642 // VNI is live through MBB. | |
643 LR.removeSegment(MBBStart, MBBEnd); | |
644 if (EndPoints) EndPoints->push_back(MBBEnd); | |
645 ++I; | |
646 } | |
647 } | |
648 } | |
649 | |
650 //===----------------------------------------------------------------------===// | |
651 // Register allocator hooks. | |
652 // | |
653 | |
654 void LiveIntervals::addKillFlags(const VirtRegMap *VRM) { | |
655 // Keep track of regunit ranges. | |
656 SmallVector<std::pair<const LiveRange*, LiveRange::const_iterator>, 8> RU; | |
657 // Keep track of subregister ranges. | |
658 SmallVector<std::pair<const LiveInterval::SubRange*, | |
659 LiveRange::const_iterator>, 4> SRs; | |
660 | |
661 for (unsigned i = 0, e = MRI->getNumVirtRegs(); i != e; ++i) { | |
662 unsigned Reg = TargetRegisterInfo::index2VirtReg(i); | |
663 if (MRI->reg_nodbg_empty(Reg)) | |
664 continue; | |
665 const LiveInterval &LI = getInterval(Reg); | |
666 if (LI.empty()) | |
667 continue; | |
668 | |
669 // Find the regunit intervals for the assigned register. They may overlap | |
670 // the virtual register live range, cancelling any kills. | |
671 RU.clear(); | |
672 for (MCRegUnitIterator Unit(VRM->getPhys(Reg), TRI); Unit.isValid(); | |
673 ++Unit) { | |
674 const LiveRange &RURange = getRegUnit(*Unit); | |
675 if (RURange.empty()) | |
676 continue; | |
677 RU.push_back(std::make_pair(&RURange, RURange.find(LI.begin()->end))); | |
678 } | |
679 | |
680 if (MRI->subRegLivenessEnabled()) { | |
681 SRs.clear(); | |
682 for (const LiveInterval::SubRange &SR : LI.subranges()) { | |
683 SRs.push_back(std::make_pair(&SR, SR.find(LI.begin()->end))); | |
684 } | |
685 } | |
686 | |
687 // Every instruction that kills Reg corresponds to a segment range end | |
688 // point. | |
689 for (LiveInterval::const_iterator RI = LI.begin(), RE = LI.end(); RI != RE; | |
690 ++RI) { | |
691 // A block index indicates an MBB edge. | |
692 if (RI->end.isBlock()) | |
693 continue; | |
694 MachineInstr *MI = getInstructionFromIndex(RI->end); | |
695 if (!MI) | |
696 continue; | |
697 | |
698 // Check if any of the regunits are live beyond the end of RI. That could | |
699 // happen when a physreg is defined as a copy of a virtreg: | |
700 // | |
701 // %eax = COPY %5 | |
702 // FOO %5 <--- MI, cancel kill because %eax is live. | |
703 // BAR killed %eax | |
704 // | |
705 // There should be no kill flag on FOO when %5 is rewritten as %eax. | |
706 for (auto &RUP : RU) { | |
707 const LiveRange &RURange = *RUP.first; | |
708 LiveRange::const_iterator &I = RUP.second; | |
709 if (I == RURange.end()) | |
710 continue; | |
711 I = RURange.advanceTo(I, RI->end); | |
712 if (I == RURange.end() || I->start >= RI->end) | |
713 continue; | |
714 // I is overlapping RI. | |
715 goto CancelKill; | |
716 } | |
717 | |
718 if (MRI->subRegLivenessEnabled()) { | |
719 // When reading a partial undefined value we must not add a kill flag. | |
720 // The regalloc might have used the undef lane for something else. | |
721 // Example: | |
722 // %1 = ... ; R32: %1 | |
723 // %2:high16 = ... ; R64: %2 | |
724 // = read killed %2 ; R64: %2 | |
725 // = read %1 ; R32: %1 | |
726 // The <kill> flag is correct for %2, but the register allocator may | |
727 // assign R0L to %1, and R0 to %2 because the low 32bits of R0 | |
728 // are actually never written by %2. After assignment the <kill> | |
729 // flag at the read instruction is invalid. | |
730 LaneBitmask DefinedLanesMask; | |
731 if (!SRs.empty()) { | |
732 // Compute a mask of lanes that are defined. | |
733 DefinedLanesMask = LaneBitmask::getNone(); | |
734 for (auto &SRP : SRs) { | |
735 const LiveInterval::SubRange &SR = *SRP.first; | |
736 LiveRange::const_iterator &I = SRP.second; | |
737 if (I == SR.end()) | |
738 continue; | |
739 I = SR.advanceTo(I, RI->end); | |
740 if (I == SR.end() || I->start >= RI->end) | |
741 continue; | |
742 // I is overlapping RI | |
743 DefinedLanesMask |= SR.LaneMask; | |
744 } | |
745 } else | |
746 DefinedLanesMask = LaneBitmask::getAll(); | |
747 | |
748 bool IsFullWrite = false; | |
749 for (const MachineOperand &MO : MI->operands()) { | |
750 if (!MO.isReg() || MO.getReg() != Reg) | |
751 continue; | |
752 if (MO.isUse()) { | |
753 // Reading any undefined lanes? | |
754 LaneBitmask UseMask = TRI->getSubRegIndexLaneMask(MO.getSubReg()); | |
755 if ((UseMask & ~DefinedLanesMask).any()) | |
756 goto CancelKill; | |
757 } else if (MO.getSubReg() == 0) { | |
758 // Writing to the full register? | |
759 assert(MO.isDef()); | |
760 IsFullWrite = true; | |
761 } | |
762 } | |
763 | |
764 // If an instruction writes to a subregister, a new segment starts in | |
765 // the LiveInterval. But as this is only overriding part of the register | |
766 // adding kill-flags is not correct here after registers have been | |
767 // assigned. | |
768 if (!IsFullWrite) { | |
769 // Next segment has to be adjacent in the subregister write case. | |
770 LiveRange::const_iterator N = std::next(RI); | |
771 if (N != LI.end() && N->start == RI->end) | |
772 goto CancelKill; | |
773 } | |
774 } | |
775 | |
776 MI->addRegisterKilled(Reg, nullptr); | |
777 continue; | |
778 CancelKill: | |
779 MI->clearRegisterKills(Reg, nullptr); | |
780 } | |
781 } | |
782 } | |
783 | |
784 MachineBasicBlock* | |
785 LiveIntervals::intervalIsInOneMBB(const LiveInterval &LI) const { | |
786 // A local live range must be fully contained inside the block, meaning it is | |
787 // defined and killed at instructions, not at block boundaries. It is not | |
788 // live in or or out of any block. | |
789 // | |
790 // It is technically possible to have a PHI-defined live range identical to a | |
791 // single block, but we are going to return false in that case. | |
792 | |
793 SlotIndex Start = LI.beginIndex(); | |
794 if (Start.isBlock()) | |
795 return nullptr; | |
796 | |
797 SlotIndex Stop = LI.endIndex(); | |
798 if (Stop.isBlock()) | |
799 return nullptr; | |
800 | |
801 // getMBBFromIndex doesn't need to search the MBB table when both indexes | |
802 // belong to proper instructions. | |
803 MachineBasicBlock *MBB1 = Indexes->getMBBFromIndex(Start); | |
804 MachineBasicBlock *MBB2 = Indexes->getMBBFromIndex(Stop); | |
805 return MBB1 == MBB2 ? MBB1 : nullptr; | |
806 } | |
807 | |
808 bool | |
809 LiveIntervals::hasPHIKill(const LiveInterval &LI, const VNInfo *VNI) const { | |
810 for (const VNInfo *PHI : LI.valnos) { | |
811 if (PHI->isUnused() || !PHI->isPHIDef()) | |
812 continue; | |
813 const MachineBasicBlock *PHIMBB = getMBBFromIndex(PHI->def); | |
814 // Conservatively return true instead of scanning huge predecessor lists. | |
815 if (PHIMBB->pred_size() > 100) | |
816 return true; | |
817 for (const MachineBasicBlock *Pred : PHIMBB->predecessors()) | |
818 if (VNI == LI.getVNInfoBefore(Indexes->getMBBEndIdx(Pred))) | |
819 return true; | |
820 } | |
821 return false; | |
822 } | |
823 | |
824 float LiveIntervals::getSpillWeight(bool isDef, bool isUse, | |
825 const MachineBlockFrequencyInfo *MBFI, | |
826 const MachineInstr &MI) { | |
827 return getSpillWeight(isDef, isUse, MBFI, MI.getParent()); | |
828 } | |
829 | |
830 float LiveIntervals::getSpillWeight(bool isDef, bool isUse, | |
831 const MachineBlockFrequencyInfo *MBFI, | |
832 const MachineBasicBlock *MBB) { | |
833 BlockFrequency Freq = MBFI->getBlockFreq(MBB); | |
834 const float Scale = 1.0f / MBFI->getEntryFreq(); | |
835 return (isDef + isUse) * (Freq.getFrequency() * Scale); | |
836 } | |
837 | |
838 LiveRange::Segment | |
839 LiveIntervals::addSegmentToEndOfBlock(unsigned reg, MachineInstr &startInst) { | |
840 LiveInterval& Interval = createEmptyInterval(reg); | |
841 VNInfo *VN = Interval.getNextValue( | |
842 SlotIndex(getInstructionIndex(startInst).getRegSlot()), | |
843 getVNInfoAllocator()); | |
844 LiveRange::Segment S(SlotIndex(getInstructionIndex(startInst).getRegSlot()), | |
845 getMBBEndIdx(startInst.getParent()), VN); | |
846 Interval.addSegment(S); | |
847 | |
848 return S; | |
849 } | |
850 | |
851 //===----------------------------------------------------------------------===// | |
852 // Register mask functions | |
853 //===----------------------------------------------------------------------===// | |
854 | |
855 bool LiveIntervals::checkRegMaskInterference(LiveInterval &LI, | |
856 BitVector &UsableRegs) { | |
857 if (LI.empty()) | |
858 return false; | |
859 LiveInterval::iterator LiveI = LI.begin(), LiveE = LI.end(); | |
860 | |
861 // Use a smaller arrays for local live ranges. | |
862 ArrayRef<SlotIndex> Slots; | |
863 ArrayRef<const uint32_t*> Bits; | |
864 if (MachineBasicBlock *MBB = intervalIsInOneMBB(LI)) { | |
865 Slots = getRegMaskSlotsInBlock(MBB->getNumber()); | |
866 Bits = getRegMaskBitsInBlock(MBB->getNumber()); | |
867 } else { | |
868 Slots = getRegMaskSlots(); | |
869 Bits = getRegMaskBits(); | |
870 } | |
871 | |
872 // We are going to enumerate all the register mask slots contained in LI. | |
873 // Start with a binary search of RegMaskSlots to find a starting point. | |
874 ArrayRef<SlotIndex>::iterator SlotI = | |
875 std::lower_bound(Slots.begin(), Slots.end(), LiveI->start); | |
876 ArrayRef<SlotIndex>::iterator SlotE = Slots.end(); | |
877 | |
878 // No slots in range, LI begins after the last call. | |
879 if (SlotI == SlotE) | |
880 return false; | |
881 | |
882 bool Found = false; | |
883 while (true) { | |
884 assert(*SlotI >= LiveI->start); | |
885 // Loop over all slots overlapping this segment. | |
886 while (*SlotI < LiveI->end) { | |
887 // *SlotI overlaps LI. Collect mask bits. | |
888 if (!Found) { | |
889 // This is the first overlap. Initialize UsableRegs to all ones. | |
890 UsableRegs.clear(); | |
891 UsableRegs.resize(TRI->getNumRegs(), true); | |
892 Found = true; | |
893 } | |
894 // Remove usable registers clobbered by this mask. | |
895 UsableRegs.clearBitsNotInMask(Bits[SlotI-Slots.begin()]); | |
896 if (++SlotI == SlotE) | |
897 return Found; | |
898 } | |
899 // *SlotI is beyond the current LI segment. | |
900 LiveI = LI.advanceTo(LiveI, *SlotI); | |
901 if (LiveI == LiveE) | |
902 return Found; | |
903 // Advance SlotI until it overlaps. | |
904 while (*SlotI < LiveI->start) | |
905 if (++SlotI == SlotE) | |
906 return Found; | |
907 } | |
908 } | |
909 | |
910 //===----------------------------------------------------------------------===// | |
911 // IntervalUpdate class. | |
912 //===----------------------------------------------------------------------===// | |
913 | |
914 /// Toolkit used by handleMove to trim or extend live intervals. | |
915 class LiveIntervals::HMEditor { | |
916 private: | |
917 LiveIntervals& LIS; | |
918 const MachineRegisterInfo& MRI; | |
919 const TargetRegisterInfo& TRI; | |
920 SlotIndex OldIdx; | |
921 SlotIndex NewIdx; | |
922 SmallPtrSet<LiveRange*, 8> Updated; | |
923 bool UpdateFlags; | |
924 | |
925 public: | |
926 HMEditor(LiveIntervals& LIS, const MachineRegisterInfo& MRI, | |
927 const TargetRegisterInfo& TRI, | |
928 SlotIndex OldIdx, SlotIndex NewIdx, bool UpdateFlags) | |
929 : LIS(LIS), MRI(MRI), TRI(TRI), OldIdx(OldIdx), NewIdx(NewIdx), | |
930 UpdateFlags(UpdateFlags) {} | |
931 | |
932 // FIXME: UpdateFlags is a workaround that creates live intervals for all | |
933 // physregs, even those that aren't needed for regalloc, in order to update | |
934 // kill flags. This is wasteful. Eventually, LiveVariables will strip all kill | |
935 // flags, and postRA passes will use a live register utility instead. | |
936 LiveRange *getRegUnitLI(unsigned Unit) { | |
937 if (UpdateFlags && !MRI.isReservedRegUnit(Unit)) | |
938 return &LIS.getRegUnit(Unit); | |
939 return LIS.getCachedRegUnit(Unit); | |
940 } | |
941 | |
942 /// Update all live ranges touched by MI, assuming a move from OldIdx to | |
943 /// NewIdx. | |
944 void updateAllRanges(MachineInstr *MI) { | |
945 DEBUG(dbgs() << "handleMove " << OldIdx << " -> " << NewIdx << ": " << *MI); | |
946 bool hasRegMask = false; | |
947 for (MachineOperand &MO : MI->operands()) { | |
948 if (MO.isRegMask()) | |
949 hasRegMask = true; | |
950 if (!MO.isReg()) | |
951 continue; | |
952 if (MO.isUse()) { | |
953 if (!MO.readsReg()) | |
954 continue; | |
955 // Aggressively clear all kill flags. | |
956 // They are reinserted by VirtRegRewriter. | |
957 MO.setIsKill(false); | |
958 } | |
959 | |
960 unsigned Reg = MO.getReg(); | |
961 if (!Reg) | |
962 continue; | |
963 if (TargetRegisterInfo::isVirtualRegister(Reg)) { | |
964 LiveInterval &LI = LIS.getInterval(Reg); | |
965 if (LI.hasSubRanges()) { | |
966 unsigned SubReg = MO.getSubReg(); | |
967 LaneBitmask LaneMask = SubReg ? TRI.getSubRegIndexLaneMask(SubReg) | |
968 : MRI.getMaxLaneMaskForVReg(Reg); | |
969 for (LiveInterval::SubRange &S : LI.subranges()) { | |
970 if ((S.LaneMask & LaneMask).none()) | |
971 continue; | |
972 updateRange(S, Reg, S.LaneMask); | |
973 } | |
974 } | |
975 updateRange(LI, Reg, LaneBitmask::getNone()); | |
976 continue; | |
977 } | |
978 | |
979 // For physregs, only update the regunits that actually have a | |
980 // precomputed live range. | |
981 for (MCRegUnitIterator Units(Reg, &TRI); Units.isValid(); ++Units) | |
982 if (LiveRange *LR = getRegUnitLI(*Units)) | |
983 updateRange(*LR, *Units, LaneBitmask::getNone()); | |
984 } | |
985 if (hasRegMask) | |
986 updateRegMaskSlots(); | |
987 } | |
988 | |
989 private: | |
990 /// Update a single live range, assuming an instruction has been moved from | |
991 /// OldIdx to NewIdx. | |
992 void updateRange(LiveRange &LR, unsigned Reg, LaneBitmask LaneMask) { | |
993 if (!Updated.insert(&LR).second) | |
994 return; | |
995 DEBUG({ | |
996 dbgs() << " "; | |
997 if (TargetRegisterInfo::isVirtualRegister(Reg)) { | |
998 dbgs() << printReg(Reg); | |
999 if (LaneMask.any()) | |
1000 dbgs() << " L" << PrintLaneMask(LaneMask); | |
1001 } else { | |
1002 dbgs() << printRegUnit(Reg, &TRI); | |
1003 } | |
1004 dbgs() << ":\t" << LR << '\n'; | |
1005 }); | |
1006 if (SlotIndex::isEarlierInstr(OldIdx, NewIdx)) | |
1007 handleMoveDown(LR); | |
1008 else | |
1009 handleMoveUp(LR, Reg, LaneMask); | |
1010 DEBUG(dbgs() << " -->\t" << LR << '\n'); | |
1011 LR.verify(); | |
1012 } | |
1013 | |
1014 /// Update LR to reflect an instruction has been moved downwards from OldIdx | |
1015 /// to NewIdx (OldIdx < NewIdx). | |
1016 void handleMoveDown(LiveRange &LR) { | |
1017 LiveRange::iterator E = LR.end(); | |
1018 // Segment going into OldIdx. | |
1019 LiveRange::iterator OldIdxIn = LR.find(OldIdx.getBaseIndex()); | |
1020 | |
1021 // No value live before or after OldIdx? Nothing to do. | |
1022 if (OldIdxIn == E || SlotIndex::isEarlierInstr(OldIdx, OldIdxIn->start)) | |
1023 return; | |
1024 | |
1025 LiveRange::iterator OldIdxOut; | |
1026 // Do we have a value live-in to OldIdx? | |
1027 if (SlotIndex::isEarlierInstr(OldIdxIn->start, OldIdx)) { | |
1028 // If the live-in value already extends to NewIdx, there is nothing to do. | |
1029 if (SlotIndex::isEarlierEqualInstr(NewIdx, OldIdxIn->end)) | |
1030 return; | |
1031 // Aggressively remove all kill flags from the old kill point. | |
1032 // Kill flags shouldn't be used while live intervals exist, they will be | |
1033 // reinserted by VirtRegRewriter. | |
1034 if (MachineInstr *KillMI = LIS.getInstructionFromIndex(OldIdxIn->end)) | |
1035 for (MIBundleOperands MO(*KillMI); MO.isValid(); ++MO) | |
1036 if (MO->isReg() && MO->isUse()) | |
1037 MO->setIsKill(false); | |
1038 | |
1039 // Is there a def before NewIdx which is not OldIdx? | |
1040 LiveRange::iterator Next = std::next(OldIdxIn); | |
1041 if (Next != E && !SlotIndex::isSameInstr(OldIdx, Next->start) && | |
1042 SlotIndex::isEarlierInstr(Next->start, NewIdx)) { | |
1043 // If we are here then OldIdx was just a use but not a def. We only have | |
1044 // to ensure liveness extends to NewIdx. | |
1045 LiveRange::iterator NewIdxIn = | |
1046 LR.advanceTo(Next, NewIdx.getBaseIndex()); | |
1047 // Extend the segment before NewIdx if necessary. | |
1048 if (NewIdxIn == E || | |
1049 !SlotIndex::isEarlierInstr(NewIdxIn->start, NewIdx)) { | |
1050 LiveRange::iterator Prev = std::prev(NewIdxIn); | |
1051 Prev->end = NewIdx.getRegSlot(); | |
1052 } | |
1053 // Extend OldIdxIn. | |
1054 OldIdxIn->end = Next->start; | |
1055 return; | |
1056 } | |
1057 | |
1058 // Adjust OldIdxIn->end to reach NewIdx. This may temporarily make LR | |
1059 // invalid by overlapping ranges. | |
1060 bool isKill = SlotIndex::isSameInstr(OldIdx, OldIdxIn->end); | |
1061 OldIdxIn->end = NewIdx.getRegSlot(OldIdxIn->end.isEarlyClobber()); | |
1062 // If this was not a kill, then there was no def and we're done. | |
1063 if (!isKill) | |
1064 return; | |
1065 | |
1066 // Did we have a Def at OldIdx? | |
1067 OldIdxOut = Next; | |
1068 if (OldIdxOut == E || !SlotIndex::isSameInstr(OldIdx, OldIdxOut->start)) | |
1069 return; | |
1070 } else { | |
1071 OldIdxOut = OldIdxIn; | |
1072 } | |
1073 | |
1074 // If we are here then there is a Definition at OldIdx. OldIdxOut points | |
1075 // to the segment starting there. | |
1076 assert(OldIdxOut != E && SlotIndex::isSameInstr(OldIdx, OldIdxOut->start) && | |
1077 "No def?"); | |
1078 VNInfo *OldIdxVNI = OldIdxOut->valno; | |
1079 assert(OldIdxVNI->def == OldIdxOut->start && "Inconsistent def"); | |
1080 | |
1081 // If the defined value extends beyond NewIdx, just move the beginning | |
1082 // of the segment to NewIdx. | |
1083 SlotIndex NewIdxDef = NewIdx.getRegSlot(OldIdxOut->start.isEarlyClobber()); | |
1084 if (SlotIndex::isEarlierInstr(NewIdxDef, OldIdxOut->end)) { | |
1085 OldIdxVNI->def = NewIdxDef; | |
1086 OldIdxOut->start = OldIdxVNI->def; | |
1087 return; | |
1088 } | |
1089 | |
1090 // If we are here then we have a Definition at OldIdx which ends before | |
1091 // NewIdx. | |
1092 | |
1093 // Is there an existing Def at NewIdx? | |
1094 LiveRange::iterator AfterNewIdx | |
1095 = LR.advanceTo(OldIdxOut, NewIdx.getRegSlot()); | |
1096 bool OldIdxDefIsDead = OldIdxOut->end.isDead(); | |
1097 if (!OldIdxDefIsDead && | |
1098 SlotIndex::isEarlierInstr(OldIdxOut->end, NewIdxDef)) { | |
1099 // OldIdx is not a dead def, and NewIdxDef is inside a new interval. | |
1100 VNInfo *DefVNI; | |
1101 if (OldIdxOut != LR.begin() && | |
1102 !SlotIndex::isEarlierInstr(std::prev(OldIdxOut)->end, | |
1103 OldIdxOut->start)) { | |
1104 // There is no gap between OldIdxOut and its predecessor anymore, | |
1105 // merge them. | |
1106 LiveRange::iterator IPrev = std::prev(OldIdxOut); | |
1107 DefVNI = OldIdxVNI; | |
1108 IPrev->end = OldIdxOut->end; | |
1109 } else { | |
1110 // The value is live in to OldIdx | |
1111 LiveRange::iterator INext = std::next(OldIdxOut); | |
1112 assert(INext != E && "Must have following segment"); | |
1113 // We merge OldIdxOut and its successor. As we're dealing with subreg | |
1114 // reordering, there is always a successor to OldIdxOut in the same BB | |
1115 // We don't need INext->valno anymore and will reuse for the new segment | |
1116 // we create later. | |
1117 DefVNI = OldIdxVNI; | |
1118 INext->start = OldIdxOut->end; | |
1119 INext->valno->def = INext->start; | |
1120 } | |
1121 // If NewIdx is behind the last segment, extend that and append a new one. | |
1122 if (AfterNewIdx == E) { | |
1123 // OldIdxOut is undef at this point, Slide (OldIdxOut;AfterNewIdx] up | |
1124 // one position. | |
1125 // |- ?/OldIdxOut -| |- X0 -| ... |- Xn -| end | |
1126 // => |- X0/OldIdxOut -| ... |- Xn -| |- undef/NewS -| end | |
1127 std::copy(std::next(OldIdxOut), E, OldIdxOut); | |
1128 // The last segment is undefined now, reuse it for a dead def. | |
1129 LiveRange::iterator NewSegment = std::prev(E); | |
1130 *NewSegment = LiveRange::Segment(NewIdxDef, NewIdxDef.getDeadSlot(), | |
1131 DefVNI); | |
1132 DefVNI->def = NewIdxDef; | |
1133 | |
1134 LiveRange::iterator Prev = std::prev(NewSegment); | |
1135 Prev->end = NewIdxDef; | |
1136 } else { | |
1137 // OldIdxOut is undef at this point, Slide (OldIdxOut;AfterNewIdx] up | |
1138 // one position. | |
1139 // |- ?/OldIdxOut -| |- X0 -| ... |- Xn/AfterNewIdx -| |- Next -| | |
1140 // => |- X0/OldIdxOut -| ... |- Xn -| |- Xn/AfterNewIdx -| |- Next -| | |
1141 std::copy(std::next(OldIdxOut), std::next(AfterNewIdx), OldIdxOut); | |
1142 LiveRange::iterator Prev = std::prev(AfterNewIdx); | |
1143 // We have two cases: | |
1144 if (SlotIndex::isEarlierInstr(Prev->start, NewIdxDef)) { | |
1145 // Case 1: NewIdx is inside a liverange. Split this liverange at | |
1146 // NewIdxDef into the segment "Prev" followed by "NewSegment". | |
1147 LiveRange::iterator NewSegment = AfterNewIdx; | |
1148 *NewSegment = LiveRange::Segment(NewIdxDef, Prev->end, Prev->valno); | |
1149 Prev->valno->def = NewIdxDef; | |
1150 | |
1151 *Prev = LiveRange::Segment(Prev->start, NewIdxDef, DefVNI); | |
1152 DefVNI->def = Prev->start; | |
1153 } else { | |
1154 // Case 2: NewIdx is in a lifetime hole. Keep AfterNewIdx as is and | |
1155 // turn Prev into a segment from NewIdx to AfterNewIdx->start. | |
1156 *Prev = LiveRange::Segment(NewIdxDef, AfterNewIdx->start, DefVNI); | |
1157 DefVNI->def = NewIdxDef; | |
1158 assert(DefVNI != AfterNewIdx->valno); | |
1159 } | |
1160 } | |
1161 return; | |
1162 } | |
1163 | |
1164 if (AfterNewIdx != E && | |
1165 SlotIndex::isSameInstr(AfterNewIdx->start, NewIdxDef)) { | |
1166 // There is an existing def at NewIdx. The def at OldIdx is coalesced into | |
1167 // that value. | |
1168 assert(AfterNewIdx->valno != OldIdxVNI && "Multiple defs of value?"); | |
1169 LR.removeValNo(OldIdxVNI); | |
1170 } else { | |
1171 // There was no existing def at NewIdx. We need to create a dead def | |
1172 // at NewIdx. Shift segments over the old OldIdxOut segment, this frees | |
1173 // a new segment at the place where we want to construct the dead def. | |
1174 // |- OldIdxOut -| |- X0 -| ... |- Xn -| |- AfterNewIdx -| | |
1175 // => |- X0/OldIdxOut -| ... |- Xn -| |- undef/NewS. -| |- AfterNewIdx -| | |
1176 assert(AfterNewIdx != OldIdxOut && "Inconsistent iterators"); | |
1177 std::copy(std::next(OldIdxOut), AfterNewIdx, OldIdxOut); | |
1178 // We can reuse OldIdxVNI now. | |
1179 LiveRange::iterator NewSegment = std::prev(AfterNewIdx); | |
1180 VNInfo *NewSegmentVNI = OldIdxVNI; | |
1181 NewSegmentVNI->def = NewIdxDef; | |
1182 *NewSegment = LiveRange::Segment(NewIdxDef, NewIdxDef.getDeadSlot(), | |
1183 NewSegmentVNI); | |
1184 } | |
1185 } | |
1186 | |
1187 /// Update LR to reflect an instruction has been moved upwards from OldIdx | |
1188 /// to NewIdx (NewIdx < OldIdx). | |
1189 void handleMoveUp(LiveRange &LR, unsigned Reg, LaneBitmask LaneMask) { | |
1190 LiveRange::iterator E = LR.end(); | |
1191 // Segment going into OldIdx. | |
1192 LiveRange::iterator OldIdxIn = LR.find(OldIdx.getBaseIndex()); | |
1193 | |
1194 // No value live before or after OldIdx? Nothing to do. | |
1195 if (OldIdxIn == E || SlotIndex::isEarlierInstr(OldIdx, OldIdxIn->start)) | |
1196 return; | |
1197 | |
1198 LiveRange::iterator OldIdxOut; | |
1199 // Do we have a value live-in to OldIdx? | |
1200 if (SlotIndex::isEarlierInstr(OldIdxIn->start, OldIdx)) { | |
1201 // If the live-in value isn't killed here, then we have no Def at | |
1202 // OldIdx, moreover the value must be live at NewIdx so there is nothing | |
1203 // to do. | |
1204 bool isKill = SlotIndex::isSameInstr(OldIdx, OldIdxIn->end); | |
1205 if (!isKill) | |
1206 return; | |
1207 | |
1208 // At this point we have to move OldIdxIn->end back to the nearest | |
1209 // previous use or (dead-)def but no further than NewIdx. | |
1210 SlotIndex DefBeforeOldIdx | |
1211 = std::max(OldIdxIn->start.getDeadSlot(), | |
1212 NewIdx.getRegSlot(OldIdxIn->end.isEarlyClobber())); | |
1213 OldIdxIn->end = findLastUseBefore(DefBeforeOldIdx, Reg, LaneMask); | |
1214 | |
1215 // Did we have a Def at OldIdx? If not we are done now. | |
1216 OldIdxOut = std::next(OldIdxIn); | |
1217 if (OldIdxOut == E || !SlotIndex::isSameInstr(OldIdx, OldIdxOut->start)) | |
1218 return; | |
1219 } else { | |
1220 OldIdxOut = OldIdxIn; | |
1221 OldIdxIn = OldIdxOut != LR.begin() ? std::prev(OldIdxOut) : E; | |
1222 } | |
1223 | |
1224 // If we are here then there is a Definition at OldIdx. OldIdxOut points | |
1225 // to the segment starting there. | |
1226 assert(OldIdxOut != E && SlotIndex::isSameInstr(OldIdx, OldIdxOut->start) && | |
1227 "No def?"); | |
1228 VNInfo *OldIdxVNI = OldIdxOut->valno; | |
1229 assert(OldIdxVNI->def == OldIdxOut->start && "Inconsistent def"); | |
1230 bool OldIdxDefIsDead = OldIdxOut->end.isDead(); | |
1231 | |
1232 // Is there an existing def at NewIdx? | |
1233 SlotIndex NewIdxDef = NewIdx.getRegSlot(OldIdxOut->start.isEarlyClobber()); | |
1234 LiveRange::iterator NewIdxOut = LR.find(NewIdx.getRegSlot()); | |
1235 if (SlotIndex::isSameInstr(NewIdxOut->start, NewIdx)) { | |
1236 assert(NewIdxOut->valno != OldIdxVNI && | |
1237 "Same value defined more than once?"); | |
1238 // If OldIdx was a dead def remove it. | |
1239 if (!OldIdxDefIsDead) { | |
1240 // Remove segment starting at NewIdx and move begin of OldIdxOut to | |
1241 // NewIdx so it can take its place. | |
1242 OldIdxVNI->def = NewIdxDef; | |
1243 OldIdxOut->start = NewIdxDef; | |
1244 LR.removeValNo(NewIdxOut->valno); | |
1245 } else { | |
1246 // Simply remove the dead def at OldIdx. | |
1247 LR.removeValNo(OldIdxVNI); | |
1248 } | |
1249 } else { | |
1250 // Previously nothing was live after NewIdx, so all we have to do now is | |
1251 // move the begin of OldIdxOut to NewIdx. | |
1252 if (!OldIdxDefIsDead) { | |
1253 // Do we have any intermediate Defs between OldIdx and NewIdx? | |
1254 if (OldIdxIn != E && | |
1255 SlotIndex::isEarlierInstr(NewIdxDef, OldIdxIn->start)) { | |
1256 // OldIdx is not a dead def and NewIdx is before predecessor start. | |
1257 LiveRange::iterator NewIdxIn = NewIdxOut; | |
1258 assert(NewIdxIn == LR.find(NewIdx.getBaseIndex())); | |
1259 const SlotIndex SplitPos = NewIdxDef; | |
1260 OldIdxVNI = OldIdxIn->valno; | |
1261 | |
1262 // Merge the OldIdxIn and OldIdxOut segments into OldIdxOut. | |
1263 OldIdxOut->valno->def = OldIdxIn->start; | |
1264 *OldIdxOut = LiveRange::Segment(OldIdxIn->start, OldIdxOut->end, | |
1265 OldIdxOut->valno); | |
1266 // OldIdxIn and OldIdxVNI are now undef and can be overridden. | |
1267 // We Slide [NewIdxIn, OldIdxIn) down one position. | |
1268 // |- X0/NewIdxIn -| ... |- Xn-1 -||- Xn/OldIdxIn -||- OldIdxOut -| | |
1269 // => |- undef/NexIdxIn -| |- X0 -| ... |- Xn-1 -| |- Xn/OldIdxOut -| | |
1270 std::copy_backward(NewIdxIn, OldIdxIn, OldIdxOut); | |
1271 // NewIdxIn is now considered undef so we can reuse it for the moved | |
1272 // value. | |
1273 LiveRange::iterator NewSegment = NewIdxIn; | |
1274 LiveRange::iterator Next = std::next(NewSegment); | |
1275 if (SlotIndex::isEarlierInstr(Next->start, NewIdx)) { | |
1276 // There is no gap between NewSegment and its predecessor. | |
1277 *NewSegment = LiveRange::Segment(Next->start, SplitPos, | |
1278 Next->valno); | |
1279 *Next = LiveRange::Segment(SplitPos, Next->end, OldIdxVNI); | |
1280 Next->valno->def = SplitPos; | |
1281 } else { | |
1282 // There is a gap between NewSegment and its predecessor | |
1283 // Value becomes live in. | |
1284 *NewSegment = LiveRange::Segment(SplitPos, Next->start, OldIdxVNI); | |
1285 NewSegment->valno->def = SplitPos; | |
1286 } | |
1287 } else { | |
1288 // Leave the end point of a live def. | |
1289 OldIdxOut->start = NewIdxDef; | |
1290 OldIdxVNI->def = NewIdxDef; | |
1291 if (OldIdxIn != E && SlotIndex::isEarlierInstr(NewIdx, OldIdxIn->end)) | |
1292 OldIdxIn->end = NewIdx.getRegSlot(); | |
1293 } | |
1294 } else { | |
1295 // OldIdxVNI is a dead def. It may have been moved across other values | |
1296 // in LR, so move OldIdxOut up to NewIdxOut. Slide [NewIdxOut;OldIdxOut) | |
1297 // down one position. | |
1298 // |- X0/NewIdxOut -| ... |- Xn-1 -| |- Xn/OldIdxOut -| |- next - | | |
1299 // => |- undef/NewIdxOut -| |- X0 -| ... |- Xn-1 -| |- next -| | |
1300 std::copy_backward(NewIdxOut, OldIdxOut, std::next(OldIdxOut)); | |
1301 // OldIdxVNI can be reused now to build a new dead def segment. | |
1302 LiveRange::iterator NewSegment = NewIdxOut; | |
1303 VNInfo *NewSegmentVNI = OldIdxVNI; | |
1304 *NewSegment = LiveRange::Segment(NewIdxDef, NewIdxDef.getDeadSlot(), | |
1305 NewSegmentVNI); | |
1306 NewSegmentVNI->def = NewIdxDef; | |
1307 } | |
1308 } | |
1309 } | |
1310 | |
1311 void updateRegMaskSlots() { | |
1312 SmallVectorImpl<SlotIndex>::iterator RI = | |
1313 std::lower_bound(LIS.RegMaskSlots.begin(), LIS.RegMaskSlots.end(), | |
1314 OldIdx); | |
1315 assert(RI != LIS.RegMaskSlots.end() && *RI == OldIdx.getRegSlot() && | |
1316 "No RegMask at OldIdx."); | |
1317 *RI = NewIdx.getRegSlot(); | |
1318 assert((RI == LIS.RegMaskSlots.begin() || | |
1319 SlotIndex::isEarlierInstr(*std::prev(RI), *RI)) && | |
1320 "Cannot move regmask instruction above another call"); | |
1321 assert((std::next(RI) == LIS.RegMaskSlots.end() || | |
1322 SlotIndex::isEarlierInstr(*RI, *std::next(RI))) && | |
1323 "Cannot move regmask instruction below another call"); | |
1324 } | |
1325 | |
1326 // Return the last use of reg between NewIdx and OldIdx. | |
1327 SlotIndex findLastUseBefore(SlotIndex Before, unsigned Reg, | |
1328 LaneBitmask LaneMask) { | |
1329 if (TargetRegisterInfo::isVirtualRegister(Reg)) { | |
1330 SlotIndex LastUse = Before; | |
1331 for (MachineOperand &MO : MRI.use_nodbg_operands(Reg)) { | |
1332 if (MO.isUndef()) | |
1333 continue; | |
1334 unsigned SubReg = MO.getSubReg(); | |
1335 if (SubReg != 0 && LaneMask.any() | |
1336 && (TRI.getSubRegIndexLaneMask(SubReg) & LaneMask).none()) | |
1337 continue; | |
1338 | |
1339 const MachineInstr &MI = *MO.getParent(); | |
1340 SlotIndex InstSlot = LIS.getSlotIndexes()->getInstructionIndex(MI); | |
1341 if (InstSlot > LastUse && InstSlot < OldIdx) | |
1342 LastUse = InstSlot.getRegSlot(); | |
1343 } | |
1344 return LastUse; | |
1345 } | |
1346 | |
1347 // This is a regunit interval, so scanning the use list could be very | |
1348 // expensive. Scan upwards from OldIdx instead. | |
1349 assert(Before < OldIdx && "Expected upwards move"); | |
1350 SlotIndexes *Indexes = LIS.getSlotIndexes(); | |
1351 MachineBasicBlock *MBB = Indexes->getMBBFromIndex(Before); | |
1352 | |
1353 // OldIdx may not correspond to an instruction any longer, so set MII to | |
1354 // point to the next instruction after OldIdx, or MBB->end(). | |
1355 MachineBasicBlock::iterator MII = MBB->end(); | |
1356 if (MachineInstr *MI = Indexes->getInstructionFromIndex( | |
1357 Indexes->getNextNonNullIndex(OldIdx))) | |
1358 if (MI->getParent() == MBB) | |
1359 MII = MI; | |
1360 | |
1361 MachineBasicBlock::iterator Begin = MBB->begin(); | |
1362 while (MII != Begin) { | |
1363 if ((--MII)->isDebugValue()) | |
1364 continue; | |
1365 SlotIndex Idx = Indexes->getInstructionIndex(*MII); | |
1366 | |
1367 // Stop searching when Before is reached. | |
1368 if (!SlotIndex::isEarlierInstr(Before, Idx)) | |
1369 return Before; | |
1370 | |
1371 // Check if MII uses Reg. | |
1372 for (MIBundleOperands MO(*MII); MO.isValid(); ++MO) | |
1373 if (MO->isReg() && !MO->isUndef() && | |
1374 TargetRegisterInfo::isPhysicalRegister(MO->getReg()) && | |
1375 TRI.hasRegUnit(MO->getReg(), Reg)) | |
1376 return Idx.getRegSlot(); | |
1377 } | |
1378 // Didn't reach Before. It must be the first instruction in the block. | |
1379 return Before; | |
1380 } | |
1381 }; | |
1382 | |
1383 void LiveIntervals::handleMove(MachineInstr &MI, bool UpdateFlags) { | |
1384 assert(!MI.isBundled() && "Can't handle bundled instructions yet."); | |
1385 SlotIndex OldIndex = Indexes->getInstructionIndex(MI); | |
1386 Indexes->removeMachineInstrFromMaps(MI); | |
1387 SlotIndex NewIndex = Indexes->insertMachineInstrInMaps(MI); | |
1388 assert(getMBBStartIdx(MI.getParent()) <= OldIndex && | |
1389 OldIndex < getMBBEndIdx(MI.getParent()) && | |
1390 "Cannot handle moves across basic block boundaries."); | |
1391 | |
1392 HMEditor HME(*this, *MRI, *TRI, OldIndex, NewIndex, UpdateFlags); | |
1393 HME.updateAllRanges(&MI); | |
1394 } | |
1395 | |
1396 void LiveIntervals::handleMoveIntoBundle(MachineInstr &MI, | |
1397 MachineInstr &BundleStart, | |
1398 bool UpdateFlags) { | |
1399 SlotIndex OldIndex = Indexes->getInstructionIndex(MI); | |
1400 SlotIndex NewIndex = Indexes->getInstructionIndex(BundleStart); | |
1401 HMEditor HME(*this, *MRI, *TRI, OldIndex, NewIndex, UpdateFlags); | |
1402 HME.updateAllRanges(&MI); | |
1403 } | |
1404 | |
1405 void LiveIntervals::repairOldRegInRange(const MachineBasicBlock::iterator Begin, | |
1406 const MachineBasicBlock::iterator End, | |
1407 const SlotIndex endIdx, | |
1408 LiveRange &LR, const unsigned Reg, | |
1409 LaneBitmask LaneMask) { | |
1410 LiveInterval::iterator LII = LR.find(endIdx); | |
1411 SlotIndex lastUseIdx; | |
1412 if (LII == LR.begin()) { | |
1413 // This happens when the function is called for a subregister that only | |
1414 // occurs _after_ the range that is to be repaired. | |
1415 return; | |
1416 } | |
1417 if (LII != LR.end() && LII->start < endIdx) | |
1418 lastUseIdx = LII->end; | |
1419 else | |
1420 --LII; | |
1421 | |
1422 for (MachineBasicBlock::iterator I = End; I != Begin;) { | |
1423 --I; | |
1424 MachineInstr &MI = *I; | |
1425 if (MI.isDebugValue()) | |
1426 continue; | |
1427 | |
1428 SlotIndex instrIdx = getInstructionIndex(MI); | |
1429 bool isStartValid = getInstructionFromIndex(LII->start); | |
1430 bool isEndValid = getInstructionFromIndex(LII->end); | |
1431 | |
1432 // FIXME: This doesn't currently handle early-clobber or multiple removed | |
1433 // defs inside of the region to repair. | |
1434 for (MachineInstr::mop_iterator OI = MI.operands_begin(), | |
1435 OE = MI.operands_end(); | |
1436 OI != OE; ++OI) { | |
1437 const MachineOperand &MO = *OI; | |
1438 if (!MO.isReg() || MO.getReg() != Reg) | |
1439 continue; | |
1440 | |
1441 unsigned SubReg = MO.getSubReg(); | |
1442 LaneBitmask Mask = TRI->getSubRegIndexLaneMask(SubReg); | |
1443 if ((Mask & LaneMask).none()) | |
1444 continue; | |
1445 | |
1446 if (MO.isDef()) { | |
1447 if (!isStartValid) { | |
1448 if (LII->end.isDead()) { | |
1449 SlotIndex prevStart; | |
1450 if (LII != LR.begin()) | |
1451 prevStart = std::prev(LII)->start; | |
1452 | |
1453 // FIXME: This could be more efficient if there was a | |
1454 // removeSegment method that returned an iterator. | |
1455 LR.removeSegment(*LII, true); | |
1456 if (prevStart.isValid()) | |
1457 LII = LR.find(prevStart); | |
1458 else | |
1459 LII = LR.begin(); | |
1460 } else { | |
1461 LII->start = instrIdx.getRegSlot(); | |
1462 LII->valno->def = instrIdx.getRegSlot(); | |
1463 if (MO.getSubReg() && !MO.isUndef()) | |
1464 lastUseIdx = instrIdx.getRegSlot(); | |
1465 else | |
1466 lastUseIdx = SlotIndex(); | |
1467 continue; | |
1468 } | |
1469 } | |
1470 | |
1471 if (!lastUseIdx.isValid()) { | |
1472 VNInfo *VNI = LR.getNextValue(instrIdx.getRegSlot(), VNInfoAllocator); | |
1473 LiveRange::Segment S(instrIdx.getRegSlot(), | |
1474 instrIdx.getDeadSlot(), VNI); | |
1475 LII = LR.addSegment(S); | |
1476 } else if (LII->start != instrIdx.getRegSlot()) { | |
1477 VNInfo *VNI = LR.getNextValue(instrIdx.getRegSlot(), VNInfoAllocator); | |
1478 LiveRange::Segment S(instrIdx.getRegSlot(), lastUseIdx, VNI); | |
1479 LII = LR.addSegment(S); | |
1480 } | |
1481 | |
1482 if (MO.getSubReg() && !MO.isUndef()) | |
1483 lastUseIdx = instrIdx.getRegSlot(); | |
1484 else | |
1485 lastUseIdx = SlotIndex(); | |
1486 } else if (MO.isUse()) { | |
1487 // FIXME: This should probably be handled outside of this branch, | |
1488 // either as part of the def case (for defs inside of the region) or | |
1489 // after the loop over the region. | |
1490 if (!isEndValid && !LII->end.isBlock()) | |
1491 LII->end = instrIdx.getRegSlot(); | |
1492 if (!lastUseIdx.isValid()) | |
1493 lastUseIdx = instrIdx.getRegSlot(); | |
1494 } | |
1495 } | |
1496 } | |
1497 } | |
1498 | |
1499 void | |
1500 LiveIntervals::repairIntervalsInRange(MachineBasicBlock *MBB, | |
1501 MachineBasicBlock::iterator Begin, | |
1502 MachineBasicBlock::iterator End, | |
1503 ArrayRef<unsigned> OrigRegs) { | |
1504 // Find anchor points, which are at the beginning/end of blocks or at | |
1505 // instructions that already have indexes. | |
1506 while (Begin != MBB->begin() && !Indexes->hasIndex(*Begin)) | |
1507 --Begin; | |
1508 while (End != MBB->end() && !Indexes->hasIndex(*End)) | |
1509 ++End; | |
1510 | |
1511 SlotIndex endIdx; | |
1512 if (End == MBB->end()) | |
1513 endIdx = getMBBEndIdx(MBB).getPrevSlot(); | |
1514 else | |
1515 endIdx = getInstructionIndex(*End); | |
1516 | |
1517 Indexes->repairIndexesInRange(MBB, Begin, End); | |
1518 | |
1519 for (MachineBasicBlock::iterator I = End; I != Begin;) { | |
1520 --I; | |
1521 MachineInstr &MI = *I; | |
1522 if (MI.isDebugValue()) | |
1523 continue; | |
1524 for (MachineInstr::const_mop_iterator MOI = MI.operands_begin(), | |
1525 MOE = MI.operands_end(); | |
1526 MOI != MOE; ++MOI) { | |
1527 if (MOI->isReg() && | |
1528 TargetRegisterInfo::isVirtualRegister(MOI->getReg()) && | |
1529 !hasInterval(MOI->getReg())) { | |
1530 createAndComputeVirtRegInterval(MOI->getReg()); | |
1531 } | |
1532 } | |
1533 } | |
1534 | |
1535 for (unsigned Reg : OrigRegs) { | |
1536 if (!TargetRegisterInfo::isVirtualRegister(Reg)) | |
1537 continue; | |
1538 | |
1539 LiveInterval &LI = getInterval(Reg); | |
1540 // FIXME: Should we support undefs that gain defs? | |
1541 if (!LI.hasAtLeastOneValue()) | |
1542 continue; | |
1543 | |
1544 for (LiveInterval::SubRange &S : LI.subranges()) | |
1545 repairOldRegInRange(Begin, End, endIdx, S, Reg, S.LaneMask); | |
1546 | |
1547 repairOldRegInRange(Begin, End, endIdx, LI, Reg); | |
1548 } | |
1549 } | |
1550 | |
1551 void LiveIntervals::removePhysRegDefAt(unsigned Reg, SlotIndex Pos) { | |
1552 for (MCRegUnitIterator Unit(Reg, TRI); Unit.isValid(); ++Unit) { | |
1553 if (LiveRange *LR = getCachedRegUnit(*Unit)) | |
1554 if (VNInfo *VNI = LR->getVNInfoAt(Pos)) | |
1555 LR->removeValNo(VNI); | |
1556 } | |
1557 } | |
1558 | |
1559 void LiveIntervals::removeVRegDefAt(LiveInterval &LI, SlotIndex Pos) { | |
1560 // LI may not have the main range computed yet, but its subranges may | |
1561 // be present. | |
1562 VNInfo *VNI = LI.getVNInfoAt(Pos); | |
1563 if (VNI != nullptr) { | |
1564 assert(VNI->def.getBaseIndex() == Pos.getBaseIndex()); | |
1565 LI.removeValNo(VNI); | |
1566 } | |
1567 | |
1568 // Also remove the value defined in subranges. | |
1569 for (LiveInterval::SubRange &S : LI.subranges()) { | |
1570 if (VNInfo *SVNI = S.getVNInfoAt(Pos)) | |
1571 if (SVNI->def.getBaseIndex() == Pos.getBaseIndex()) | |
1572 S.removeValNo(SVNI); | |
1573 } | |
1574 LI.removeEmptySubRanges(); | |
1575 } | |
1576 | |
1577 void LiveIntervals::splitSeparateComponents(LiveInterval &LI, | |
1578 SmallVectorImpl<LiveInterval*> &SplitLIs) { | |
1579 ConnectedVNInfoEqClasses ConEQ(*this); | |
1580 unsigned NumComp = ConEQ.Classify(LI); | |
1581 if (NumComp <= 1) | |
1582 return; | |
1583 DEBUG(dbgs() << " Split " << NumComp << " components: " << LI << '\n'); | |
1584 unsigned Reg = LI.reg; | |
1585 const TargetRegisterClass *RegClass = MRI->getRegClass(Reg); | |
1586 for (unsigned I = 1; I < NumComp; ++I) { | |
1587 unsigned NewVReg = MRI->createVirtualRegister(RegClass); | |
1588 LiveInterval &NewLI = createEmptyInterval(NewVReg); | |
1589 SplitLIs.push_back(&NewLI); | |
1590 } | |
1591 ConEQ.Distribute(LI, SplitLIs.data(), *MRI); | |
1592 } | |
1593 | |
1594 void LiveIntervals::constructMainRangeFromSubranges(LiveInterval &LI) { | |
1595 assert(LRCalc && "LRCalc not initialized."); | |
1596 LRCalc->reset(MF, getSlotIndexes(), DomTree, &getVNInfoAllocator()); | |
1597 LRCalc->constructMainRangeFromSubranges(LI); | |
1598 } |