comparison lib/CodeGen/TargetInstrInfo.cpp @ 0:95c75e76d11b LLVM3.4

LLVM 3.4
author Kaito Tokumori <e105711@ie.u-ryukyu.ac.jp>
date Thu, 12 Dec 2013 13:56:28 +0900
parents
children e4204d083e25
comparison
equal deleted inserted replaced
-1:000000000000 0:95c75e76d11b
1 //===-- TargetInstrInfo.cpp - Target Instruction Information --------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the TargetInstrInfo class.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "llvm/Target/TargetInstrInfo.h"
15 #include "llvm/CodeGen/MachineFrameInfo.h"
16 #include "llvm/CodeGen/MachineMemOperand.h"
17 #include "llvm/CodeGen/MachineRegisterInfo.h"
18 #include "llvm/CodeGen/PseudoSourceValue.h"
19 #include "llvm/CodeGen/ScoreboardHazardRecognizer.h"
20 #include "llvm/IR/DataLayout.h"
21 #include "llvm/MC/MCAsmInfo.h"
22 #include "llvm/MC/MCInstrItineraries.h"
23 #include "llvm/Support/CommandLine.h"
24 #include "llvm/Support/ErrorHandling.h"
25 #include "llvm/Support/raw_ostream.h"
26 #include "llvm/Target/TargetLowering.h"
27 #include "llvm/Target/TargetMachine.h"
28 #include "llvm/Target/TargetRegisterInfo.h"
29 #include <cctype>
30 using namespace llvm;
31
32 static cl::opt<bool> DisableHazardRecognizer(
33 "disable-sched-hazard", cl::Hidden, cl::init(false),
34 cl::desc("Disable hazard detection during preRA scheduling"));
35
36 TargetInstrInfo::~TargetInstrInfo() {
37 }
38
39 const TargetRegisterClass*
40 TargetInstrInfo::getRegClass(const MCInstrDesc &MCID, unsigned OpNum,
41 const TargetRegisterInfo *TRI,
42 const MachineFunction &MF) const {
43 if (OpNum >= MCID.getNumOperands())
44 return 0;
45
46 short RegClass = MCID.OpInfo[OpNum].RegClass;
47 if (MCID.OpInfo[OpNum].isLookupPtrRegClass())
48 return TRI->getPointerRegClass(MF, RegClass);
49
50 // Instructions like INSERT_SUBREG do not have fixed register classes.
51 if (RegClass < 0)
52 return 0;
53
54 // Otherwise just look it up normally.
55 return TRI->getRegClass(RegClass);
56 }
57
58 /// insertNoop - Insert a noop into the instruction stream at the specified
59 /// point.
60 void TargetInstrInfo::insertNoop(MachineBasicBlock &MBB,
61 MachineBasicBlock::iterator MI) const {
62 llvm_unreachable("Target didn't implement insertNoop!");
63 }
64
65 /// Measure the specified inline asm to determine an approximation of its
66 /// length.
67 /// Comments (which run till the next SeparatorString or newline) do not
68 /// count as an instruction.
69 /// Any other non-whitespace text is considered an instruction, with
70 /// multiple instructions separated by SeparatorString or newlines.
71 /// Variable-length instructions are not handled here; this function
72 /// may be overloaded in the target code to do that.
73 unsigned TargetInstrInfo::getInlineAsmLength(const char *Str,
74 const MCAsmInfo &MAI) const {
75
76
77 // Count the number of instructions in the asm.
78 bool atInsnStart = true;
79 unsigned Length = 0;
80 for (; *Str; ++Str) {
81 if (*Str == '\n' || strncmp(Str, MAI.getSeparatorString(),
82 strlen(MAI.getSeparatorString())) == 0)
83 atInsnStart = true;
84 if (atInsnStart && !std::isspace(static_cast<unsigned char>(*Str))) {
85 Length += MAI.getMaxInstLength();
86 atInsnStart = false;
87 }
88 if (atInsnStart && strncmp(Str, MAI.getCommentString(),
89 strlen(MAI.getCommentString())) == 0)
90 atInsnStart = false;
91 }
92
93 return Length;
94 }
95
96 /// ReplaceTailWithBranchTo - Delete the instruction OldInst and everything
97 /// after it, replacing it with an unconditional branch to NewDest.
98 void
99 TargetInstrInfo::ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,
100 MachineBasicBlock *NewDest) const {
101 MachineBasicBlock *MBB = Tail->getParent();
102
103 // Remove all the old successors of MBB from the CFG.
104 while (!MBB->succ_empty())
105 MBB->removeSuccessor(MBB->succ_begin());
106
107 // Remove all the dead instructions from the end of MBB.
108 MBB->erase(Tail, MBB->end());
109
110 // If MBB isn't immediately before MBB, insert a branch to it.
111 if (++MachineFunction::iterator(MBB) != MachineFunction::iterator(NewDest))
112 InsertBranch(*MBB, NewDest, 0, SmallVector<MachineOperand, 0>(),
113 Tail->getDebugLoc());
114 MBB->addSuccessor(NewDest);
115 }
116
117 // commuteInstruction - The default implementation of this method just exchanges
118 // the two operands returned by findCommutedOpIndices.
119 MachineInstr *TargetInstrInfo::commuteInstruction(MachineInstr *MI,
120 bool NewMI) const {
121 const MCInstrDesc &MCID = MI->getDesc();
122 bool HasDef = MCID.getNumDefs();
123 if (HasDef && !MI->getOperand(0).isReg())
124 // No idea how to commute this instruction. Target should implement its own.
125 return 0;
126 unsigned Idx1, Idx2;
127 if (!findCommutedOpIndices(MI, Idx1, Idx2)) {
128 std::string msg;
129 raw_string_ostream Msg(msg);
130 Msg << "Don't know how to commute: " << *MI;
131 report_fatal_error(Msg.str());
132 }
133
134 assert(MI->getOperand(Idx1).isReg() && MI->getOperand(Idx2).isReg() &&
135 "This only knows how to commute register operands so far");
136 unsigned Reg0 = HasDef ? MI->getOperand(0).getReg() : 0;
137 unsigned Reg1 = MI->getOperand(Idx1).getReg();
138 unsigned Reg2 = MI->getOperand(Idx2).getReg();
139 unsigned SubReg0 = HasDef ? MI->getOperand(0).getSubReg() : 0;
140 unsigned SubReg1 = MI->getOperand(Idx1).getSubReg();
141 unsigned SubReg2 = MI->getOperand(Idx2).getSubReg();
142 bool Reg1IsKill = MI->getOperand(Idx1).isKill();
143 bool Reg2IsKill = MI->getOperand(Idx2).isKill();
144 // If destination is tied to either of the commuted source register, then
145 // it must be updated.
146 if (HasDef && Reg0 == Reg1 &&
147 MI->getDesc().getOperandConstraint(Idx1, MCOI::TIED_TO) == 0) {
148 Reg2IsKill = false;
149 Reg0 = Reg2;
150 SubReg0 = SubReg2;
151 } else if (HasDef && Reg0 == Reg2 &&
152 MI->getDesc().getOperandConstraint(Idx2, MCOI::TIED_TO) == 0) {
153 Reg1IsKill = false;
154 Reg0 = Reg1;
155 SubReg0 = SubReg1;
156 }
157
158 if (NewMI) {
159 // Create a new instruction.
160 MachineFunction &MF = *MI->getParent()->getParent();
161 MI = MF.CloneMachineInstr(MI);
162 }
163
164 if (HasDef) {
165 MI->getOperand(0).setReg(Reg0);
166 MI->getOperand(0).setSubReg(SubReg0);
167 }
168 MI->getOperand(Idx2).setReg(Reg1);
169 MI->getOperand(Idx1).setReg(Reg2);
170 MI->getOperand(Idx2).setSubReg(SubReg1);
171 MI->getOperand(Idx1).setSubReg(SubReg2);
172 MI->getOperand(Idx2).setIsKill(Reg1IsKill);
173 MI->getOperand(Idx1).setIsKill(Reg2IsKill);
174 return MI;
175 }
176
177 /// findCommutedOpIndices - If specified MI is commutable, return the two
178 /// operand indices that would swap value. Return true if the instruction
179 /// is not in a form which this routine understands.
180 bool TargetInstrInfo::findCommutedOpIndices(MachineInstr *MI,
181 unsigned &SrcOpIdx1,
182 unsigned &SrcOpIdx2) const {
183 assert(!MI->isBundle() &&
184 "TargetInstrInfo::findCommutedOpIndices() can't handle bundles");
185
186 const MCInstrDesc &MCID = MI->getDesc();
187 if (!MCID.isCommutable())
188 return false;
189 // This assumes v0 = op v1, v2 and commuting would swap v1 and v2. If this
190 // is not true, then the target must implement this.
191 SrcOpIdx1 = MCID.getNumDefs();
192 SrcOpIdx2 = SrcOpIdx1 + 1;
193 if (!MI->getOperand(SrcOpIdx1).isReg() ||
194 !MI->getOperand(SrcOpIdx2).isReg())
195 // No idea.
196 return false;
197 return true;
198 }
199
200
201 bool
202 TargetInstrInfo::isUnpredicatedTerminator(const MachineInstr *MI) const {
203 if (!MI->isTerminator()) return false;
204
205 // Conditional branch is a special case.
206 if (MI->isBranch() && !MI->isBarrier())
207 return true;
208 if (!MI->isPredicable())
209 return true;
210 return !isPredicated(MI);
211 }
212
213
214 bool TargetInstrInfo::PredicateInstruction(MachineInstr *MI,
215 const SmallVectorImpl<MachineOperand> &Pred) const {
216 bool MadeChange = false;
217
218 assert(!MI->isBundle() &&
219 "TargetInstrInfo::PredicateInstruction() can't handle bundles");
220
221 const MCInstrDesc &MCID = MI->getDesc();
222 if (!MI->isPredicable())
223 return false;
224
225 for (unsigned j = 0, i = 0, e = MI->getNumOperands(); i != e; ++i) {
226 if (MCID.OpInfo[i].isPredicate()) {
227 MachineOperand &MO = MI->getOperand(i);
228 if (MO.isReg()) {
229 MO.setReg(Pred[j].getReg());
230 MadeChange = true;
231 } else if (MO.isImm()) {
232 MO.setImm(Pred[j].getImm());
233 MadeChange = true;
234 } else if (MO.isMBB()) {
235 MO.setMBB(Pred[j].getMBB());
236 MadeChange = true;
237 }
238 ++j;
239 }
240 }
241 return MadeChange;
242 }
243
244 bool TargetInstrInfo::hasLoadFromStackSlot(const MachineInstr *MI,
245 const MachineMemOperand *&MMO,
246 int &FrameIndex) const {
247 for (MachineInstr::mmo_iterator o = MI->memoperands_begin(),
248 oe = MI->memoperands_end();
249 o != oe;
250 ++o) {
251 if ((*o)->isLoad() && (*o)->getValue())
252 if (const FixedStackPseudoSourceValue *Value =
253 dyn_cast<const FixedStackPseudoSourceValue>((*o)->getValue())) {
254 FrameIndex = Value->getFrameIndex();
255 MMO = *o;
256 return true;
257 }
258 }
259 return false;
260 }
261
262 bool TargetInstrInfo::hasStoreToStackSlot(const MachineInstr *MI,
263 const MachineMemOperand *&MMO,
264 int &FrameIndex) const {
265 for (MachineInstr::mmo_iterator o = MI->memoperands_begin(),
266 oe = MI->memoperands_end();
267 o != oe;
268 ++o) {
269 if ((*o)->isStore() && (*o)->getValue())
270 if (const FixedStackPseudoSourceValue *Value =
271 dyn_cast<const FixedStackPseudoSourceValue>((*o)->getValue())) {
272 FrameIndex = Value->getFrameIndex();
273 MMO = *o;
274 return true;
275 }
276 }
277 return false;
278 }
279
280 bool TargetInstrInfo::getStackSlotRange(const TargetRegisterClass *RC,
281 unsigned SubIdx, unsigned &Size,
282 unsigned &Offset,
283 const TargetMachine *TM) const {
284 if (!SubIdx) {
285 Size = RC->getSize();
286 Offset = 0;
287 return true;
288 }
289 unsigned BitSize = TM->getRegisterInfo()->getSubRegIdxSize(SubIdx);
290 // Convert bit size to byte size to be consistent with
291 // MCRegisterClass::getSize().
292 if (BitSize % 8)
293 return false;
294
295 int BitOffset = TM->getRegisterInfo()->getSubRegIdxOffset(SubIdx);
296 if (BitOffset < 0 || BitOffset % 8)
297 return false;
298
299 Size = BitSize /= 8;
300 Offset = (unsigned)BitOffset / 8;
301
302 assert(RC->getSize() >= (Offset + Size) && "bad subregister range");
303
304 if (!TM->getDataLayout()->isLittleEndian()) {
305 Offset = RC->getSize() - (Offset + Size);
306 }
307 return true;
308 }
309
310 void TargetInstrInfo::reMaterialize(MachineBasicBlock &MBB,
311 MachineBasicBlock::iterator I,
312 unsigned DestReg,
313 unsigned SubIdx,
314 const MachineInstr *Orig,
315 const TargetRegisterInfo &TRI) const {
316 MachineInstr *MI = MBB.getParent()->CloneMachineInstr(Orig);
317 MI->substituteRegister(MI->getOperand(0).getReg(), DestReg, SubIdx, TRI);
318 MBB.insert(I, MI);
319 }
320
321 bool
322 TargetInstrInfo::produceSameValue(const MachineInstr *MI0,
323 const MachineInstr *MI1,
324 const MachineRegisterInfo *MRI) const {
325 return MI0->isIdenticalTo(MI1, MachineInstr::IgnoreVRegDefs);
326 }
327
328 MachineInstr *TargetInstrInfo::duplicate(MachineInstr *Orig,
329 MachineFunction &MF) const {
330 assert(!Orig->isNotDuplicable() &&
331 "Instruction cannot be duplicated");
332 return MF.CloneMachineInstr(Orig);
333 }
334
335 // If the COPY instruction in MI can be folded to a stack operation, return
336 // the register class to use.
337 static const TargetRegisterClass *canFoldCopy(const MachineInstr *MI,
338 unsigned FoldIdx) {
339 assert(MI->isCopy() && "MI must be a COPY instruction");
340 if (MI->getNumOperands() != 2)
341 return 0;
342 assert(FoldIdx<2 && "FoldIdx refers no nonexistent operand");
343
344 const MachineOperand &FoldOp = MI->getOperand(FoldIdx);
345 const MachineOperand &LiveOp = MI->getOperand(1-FoldIdx);
346
347 if (FoldOp.getSubReg() || LiveOp.getSubReg())
348 return 0;
349
350 unsigned FoldReg = FoldOp.getReg();
351 unsigned LiveReg = LiveOp.getReg();
352
353 assert(TargetRegisterInfo::isVirtualRegister(FoldReg) &&
354 "Cannot fold physregs");
355
356 const MachineRegisterInfo &MRI = MI->getParent()->getParent()->getRegInfo();
357 const TargetRegisterClass *RC = MRI.getRegClass(FoldReg);
358
359 if (TargetRegisterInfo::isPhysicalRegister(LiveOp.getReg()))
360 return RC->contains(LiveOp.getReg()) ? RC : 0;
361
362 if (RC->hasSubClassEq(MRI.getRegClass(LiveReg)))
363 return RC;
364
365 // FIXME: Allow folding when register classes are memory compatible.
366 return 0;
367 }
368
369 bool TargetInstrInfo::
370 canFoldMemoryOperand(const MachineInstr *MI,
371 const SmallVectorImpl<unsigned> &Ops) const {
372 return MI->isCopy() && Ops.size() == 1 && canFoldCopy(MI, Ops[0]);
373 }
374
375 /// foldMemoryOperand - Attempt to fold a load or store of the specified stack
376 /// slot into the specified machine instruction for the specified operand(s).
377 /// If this is possible, a new instruction is returned with the specified
378 /// operand folded, otherwise NULL is returned. The client is responsible for
379 /// removing the old instruction and adding the new one in the instruction
380 /// stream.
381 MachineInstr*
382 TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI,
383 const SmallVectorImpl<unsigned> &Ops,
384 int FI) const {
385 unsigned Flags = 0;
386 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
387 if (MI->getOperand(Ops[i]).isDef())
388 Flags |= MachineMemOperand::MOStore;
389 else
390 Flags |= MachineMemOperand::MOLoad;
391
392 MachineBasicBlock *MBB = MI->getParent();
393 assert(MBB && "foldMemoryOperand needs an inserted instruction");
394 MachineFunction &MF = *MBB->getParent();
395
396 // Ask the target to do the actual folding.
397 if (MachineInstr *NewMI = foldMemoryOperandImpl(MF, MI, Ops, FI)) {
398 NewMI->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
399 // Add a memory operand, foldMemoryOperandImpl doesn't do that.
400 assert((!(Flags & MachineMemOperand::MOStore) ||
401 NewMI->mayStore()) &&
402 "Folded a def to a non-store!");
403 assert((!(Flags & MachineMemOperand::MOLoad) ||
404 NewMI->mayLoad()) &&
405 "Folded a use to a non-load!");
406 const MachineFrameInfo &MFI = *MF.getFrameInfo();
407 assert(MFI.getObjectOffset(FI) != -1);
408 MachineMemOperand *MMO =
409 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(FI),
410 Flags, MFI.getObjectSize(FI),
411 MFI.getObjectAlignment(FI));
412 NewMI->addMemOperand(MF, MMO);
413
414 // FIXME: change foldMemoryOperandImpl semantics to also insert NewMI.
415 return MBB->insert(MI, NewMI);
416 }
417
418 // Straight COPY may fold as load/store.
419 if (!MI->isCopy() || Ops.size() != 1)
420 return 0;
421
422 const TargetRegisterClass *RC = canFoldCopy(MI, Ops[0]);
423 if (!RC)
424 return 0;
425
426 const MachineOperand &MO = MI->getOperand(1-Ops[0]);
427 MachineBasicBlock::iterator Pos = MI;
428 const TargetRegisterInfo *TRI = MF.getTarget().getRegisterInfo();
429
430 if (Flags == MachineMemOperand::MOStore)
431 storeRegToStackSlot(*MBB, Pos, MO.getReg(), MO.isKill(), FI, RC, TRI);
432 else
433 loadRegFromStackSlot(*MBB, Pos, MO.getReg(), FI, RC, TRI);
434 return --Pos;
435 }
436
437 /// foldMemoryOperand - Same as the previous version except it allows folding
438 /// of any load and store from / to any address, not just from a specific
439 /// stack slot.
440 MachineInstr*
441 TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI,
442 const SmallVectorImpl<unsigned> &Ops,
443 MachineInstr* LoadMI) const {
444 assert(LoadMI->canFoldAsLoad() && "LoadMI isn't foldable!");
445 #ifndef NDEBUG
446 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
447 assert(MI->getOperand(Ops[i]).isUse() && "Folding load into def!");
448 #endif
449 MachineBasicBlock &MBB = *MI->getParent();
450 MachineFunction &MF = *MBB.getParent();
451
452 // Ask the target to do the actual folding.
453 MachineInstr *NewMI = foldMemoryOperandImpl(MF, MI, Ops, LoadMI);
454 if (!NewMI) return 0;
455
456 NewMI = MBB.insert(MI, NewMI);
457
458 // Copy the memoperands from the load to the folded instruction.
459 if (MI->memoperands_empty()) {
460 NewMI->setMemRefs(LoadMI->memoperands_begin(),
461 LoadMI->memoperands_end());
462 }
463 else {
464 // Handle the rare case of folding multiple loads.
465 NewMI->setMemRefs(MI->memoperands_begin(),
466 MI->memoperands_end());
467 for (MachineInstr::mmo_iterator I = LoadMI->memoperands_begin(),
468 E = LoadMI->memoperands_end(); I != E; ++I) {
469 NewMI->addMemOperand(MF, *I);
470 }
471 }
472 return NewMI;
473 }
474
475 bool TargetInstrInfo::
476 isReallyTriviallyReMaterializableGeneric(const MachineInstr *MI,
477 AliasAnalysis *AA) const {
478 const MachineFunction &MF = *MI->getParent()->getParent();
479 const MachineRegisterInfo &MRI = MF.getRegInfo();
480 const TargetMachine &TM = MF.getTarget();
481 const TargetInstrInfo &TII = *TM.getInstrInfo();
482
483 // Remat clients assume operand 0 is the defined register.
484 if (!MI->getNumOperands() || !MI->getOperand(0).isReg())
485 return false;
486 unsigned DefReg = MI->getOperand(0).getReg();
487
488 // A sub-register definition can only be rematerialized if the instruction
489 // doesn't read the other parts of the register. Otherwise it is really a
490 // read-modify-write operation on the full virtual register which cannot be
491 // moved safely.
492 if (TargetRegisterInfo::isVirtualRegister(DefReg) &&
493 MI->getOperand(0).getSubReg() && MI->readsVirtualRegister(DefReg))
494 return false;
495
496 // A load from a fixed stack slot can be rematerialized. This may be
497 // redundant with subsequent checks, but it's target-independent,
498 // simple, and a common case.
499 int FrameIdx = 0;
500 if (TII.isLoadFromStackSlot(MI, FrameIdx) &&
501 MF.getFrameInfo()->isImmutableObjectIndex(FrameIdx))
502 return true;
503
504 // Avoid instructions obviously unsafe for remat.
505 if (MI->isNotDuplicable() || MI->mayStore() ||
506 MI->hasUnmodeledSideEffects())
507 return false;
508
509 // Don't remat inline asm. We have no idea how expensive it is
510 // even if it's side effect free.
511 if (MI->isInlineAsm())
512 return false;
513
514 // Avoid instructions which load from potentially varying memory.
515 if (MI->mayLoad() && !MI->isInvariantLoad(AA))
516 return false;
517
518 // If any of the registers accessed are non-constant, conservatively assume
519 // the instruction is not rematerializable.
520 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
521 const MachineOperand &MO = MI->getOperand(i);
522 if (!MO.isReg()) continue;
523 unsigned Reg = MO.getReg();
524 if (Reg == 0)
525 continue;
526
527 // Check for a well-behaved physical register.
528 if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
529 if (MO.isUse()) {
530 // If the physreg has no defs anywhere, it's just an ambient register
531 // and we can freely move its uses. Alternatively, if it's allocatable,
532 // it could get allocated to something with a def during allocation.
533 if (!MRI.isConstantPhysReg(Reg, MF))
534 return false;
535 } else {
536 // A physreg def. We can't remat it.
537 return false;
538 }
539 continue;
540 }
541
542 // Only allow one virtual-register def. There may be multiple defs of the
543 // same virtual register, though.
544 if (MO.isDef() && Reg != DefReg)
545 return false;
546
547 // Don't allow any virtual-register uses. Rematting an instruction with
548 // virtual register uses would length the live ranges of the uses, which
549 // is not necessarily a good idea, certainly not "trivial".
550 if (MO.isUse())
551 return false;
552 }
553
554 // Everything checked out.
555 return true;
556 }
557
558 /// isSchedulingBoundary - Test if the given instruction should be
559 /// considered a scheduling boundary. This primarily includes labels
560 /// and terminators.
561 bool TargetInstrInfo::isSchedulingBoundary(const MachineInstr *MI,
562 const MachineBasicBlock *MBB,
563 const MachineFunction &MF) const {
564 // Terminators and labels can't be scheduled around.
565 if (MI->isTerminator() || MI->isLabel())
566 return true;
567
568 // Don't attempt to schedule around any instruction that defines
569 // a stack-oriented pointer, as it's unlikely to be profitable. This
570 // saves compile time, because it doesn't require every single
571 // stack slot reference to depend on the instruction that does the
572 // modification.
573 const TargetLowering &TLI = *MF.getTarget().getTargetLowering();
574 const TargetRegisterInfo *TRI = MF.getTarget().getRegisterInfo();
575 if (MI->modifiesRegister(TLI.getStackPointerRegisterToSaveRestore(), TRI))
576 return true;
577
578 return false;
579 }
580
581 // Provide a global flag for disabling the PreRA hazard recognizer that targets
582 // may choose to honor.
583 bool TargetInstrInfo::usePreRAHazardRecognizer() const {
584 return !DisableHazardRecognizer;
585 }
586
587 // Default implementation of CreateTargetRAHazardRecognizer.
588 ScheduleHazardRecognizer *TargetInstrInfo::
589 CreateTargetHazardRecognizer(const TargetMachine *TM,
590 const ScheduleDAG *DAG) const {
591 // Dummy hazard recognizer allows all instructions to issue.
592 return new ScheduleHazardRecognizer();
593 }
594
595 // Default implementation of CreateTargetMIHazardRecognizer.
596 ScheduleHazardRecognizer *TargetInstrInfo::
597 CreateTargetMIHazardRecognizer(const InstrItineraryData *II,
598 const ScheduleDAG *DAG) const {
599 return (ScheduleHazardRecognizer *)
600 new ScoreboardHazardRecognizer(II, DAG, "misched");
601 }
602
603 // Default implementation of CreateTargetPostRAHazardRecognizer.
604 ScheduleHazardRecognizer *TargetInstrInfo::
605 CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II,
606 const ScheduleDAG *DAG) const {
607 return (ScheduleHazardRecognizer *)
608 new ScoreboardHazardRecognizer(II, DAG, "post-RA-sched");
609 }
610
611 //===----------------------------------------------------------------------===//
612 // SelectionDAG latency interface.
613 //===----------------------------------------------------------------------===//
614
615 int
616 TargetInstrInfo::getOperandLatency(const InstrItineraryData *ItinData,
617 SDNode *DefNode, unsigned DefIdx,
618 SDNode *UseNode, unsigned UseIdx) const {
619 if (!ItinData || ItinData->isEmpty())
620 return -1;
621
622 if (!DefNode->isMachineOpcode())
623 return -1;
624
625 unsigned DefClass = get(DefNode->getMachineOpcode()).getSchedClass();
626 if (!UseNode->isMachineOpcode())
627 return ItinData->getOperandCycle(DefClass, DefIdx);
628 unsigned UseClass = get(UseNode->getMachineOpcode()).getSchedClass();
629 return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
630 }
631
632 int TargetInstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
633 SDNode *N) const {
634 if (!ItinData || ItinData->isEmpty())
635 return 1;
636
637 if (!N->isMachineOpcode())
638 return 1;
639
640 return ItinData->getStageLatency(get(N->getMachineOpcode()).getSchedClass());
641 }
642
643 //===----------------------------------------------------------------------===//
644 // MachineInstr latency interface.
645 //===----------------------------------------------------------------------===//
646
647 unsigned
648 TargetInstrInfo::getNumMicroOps(const InstrItineraryData *ItinData,
649 const MachineInstr *MI) const {
650 if (!ItinData || ItinData->isEmpty())
651 return 1;
652
653 unsigned Class = MI->getDesc().getSchedClass();
654 int UOps = ItinData->Itineraries[Class].NumMicroOps;
655 if (UOps >= 0)
656 return UOps;
657
658 // The # of u-ops is dynamically determined. The specific target should
659 // override this function to return the right number.
660 return 1;
661 }
662
663 /// Return the default expected latency for a def based on it's opcode.
664 unsigned TargetInstrInfo::defaultDefLatency(const MCSchedModel *SchedModel,
665 const MachineInstr *DefMI) const {
666 if (DefMI->isTransient())
667 return 0;
668 if (DefMI->mayLoad())
669 return SchedModel->LoadLatency;
670 if (isHighLatencyDef(DefMI->getOpcode()))
671 return SchedModel->HighLatency;
672 return 1;
673 }
674
675 unsigned TargetInstrInfo::getPredicationCost(const MachineInstr *) const {
676 return 0;
677 }
678
679 unsigned TargetInstrInfo::
680 getInstrLatency(const InstrItineraryData *ItinData,
681 const MachineInstr *MI,
682 unsigned *PredCost) const {
683 // Default to one cycle for no itinerary. However, an "empty" itinerary may
684 // still have a MinLatency property, which getStageLatency checks.
685 if (!ItinData)
686 return MI->mayLoad() ? 2 : 1;
687
688 return ItinData->getStageLatency(MI->getDesc().getSchedClass());
689 }
690
691 bool TargetInstrInfo::hasLowDefLatency(const InstrItineraryData *ItinData,
692 const MachineInstr *DefMI,
693 unsigned DefIdx) const {
694 if (!ItinData || ItinData->isEmpty())
695 return false;
696
697 unsigned DefClass = DefMI->getDesc().getSchedClass();
698 int DefCycle = ItinData->getOperandCycle(DefClass, DefIdx);
699 return (DefCycle != -1 && DefCycle <= 1);
700 }
701
702 /// Both DefMI and UseMI must be valid. By default, call directly to the
703 /// itinerary. This may be overriden by the target.
704 int TargetInstrInfo::
705 getOperandLatency(const InstrItineraryData *ItinData,
706 const MachineInstr *DefMI, unsigned DefIdx,
707 const MachineInstr *UseMI, unsigned UseIdx) const {
708 unsigned DefClass = DefMI->getDesc().getSchedClass();
709 unsigned UseClass = UseMI->getDesc().getSchedClass();
710 return ItinData->getOperandLatency(DefClass, DefIdx, UseClass, UseIdx);
711 }
712
713 /// If we can determine the operand latency from the def only, without itinerary
714 /// lookup, do so. Otherwise return -1.
715 int TargetInstrInfo::computeDefOperandLatency(
716 const InstrItineraryData *ItinData,
717 const MachineInstr *DefMI) const {
718
719 // Let the target hook getInstrLatency handle missing itineraries.
720 if (!ItinData)
721 return getInstrLatency(ItinData, DefMI);
722
723 if(ItinData->isEmpty())
724 return defaultDefLatency(ItinData->SchedModel, DefMI);
725
726 // ...operand lookup required
727 return -1;
728 }
729
730 /// computeOperandLatency - Compute and return the latency of the given data
731 /// dependent def and use when the operand indices are already known. UseMI may
732 /// be NULL for an unknown use.
733 ///
734 /// FindMin may be set to get the minimum vs. expected latency. Minimum
735 /// latency is used for scheduling groups, while expected latency is for
736 /// instruction cost and critical path.
737 ///
738 /// Depending on the subtarget's itinerary properties, this may or may not need
739 /// to call getOperandLatency(). For most subtargets, we don't need DefIdx or
740 /// UseIdx to compute min latency.
741 unsigned TargetInstrInfo::
742 computeOperandLatency(const InstrItineraryData *ItinData,
743 const MachineInstr *DefMI, unsigned DefIdx,
744 const MachineInstr *UseMI, unsigned UseIdx) const {
745
746 int DefLatency = computeDefOperandLatency(ItinData, DefMI);
747 if (DefLatency >= 0)
748 return DefLatency;
749
750 assert(ItinData && !ItinData->isEmpty() && "computeDefOperandLatency fail");
751
752 int OperLatency = 0;
753 if (UseMI)
754 OperLatency = getOperandLatency(ItinData, DefMI, DefIdx, UseMI, UseIdx);
755 else {
756 unsigned DefClass = DefMI->getDesc().getSchedClass();
757 OperLatency = ItinData->getOperandCycle(DefClass, DefIdx);
758 }
759 if (OperLatency >= 0)
760 return OperLatency;
761
762 // No operand latency was found.
763 unsigned InstrLatency = getInstrLatency(ItinData, DefMI);
764
765 // Expected latency is the max of the stage latency and itinerary props.
766 InstrLatency = std::max(InstrLatency,
767 defaultDefLatency(ItinData->SchedModel, DefMI));
768 return InstrLatency;
769 }