comparison lib/Target/ARM/ARMTargetTransformInfo.h @ 120:1172e4bd9c6f

update 4.0.0
author mir3636
date Fri, 25 Nov 2016 19:14:25 +0900
parents afa8332a0e37
children 803732b1fca8
comparison
equal deleted inserted replaced
101:34baf5011add 120:1172e4bd9c6f
43 public: 43 public:
44 explicit ARMTTIImpl(const ARMBaseTargetMachine *TM, const Function &F) 44 explicit ARMTTIImpl(const ARMBaseTargetMachine *TM, const Function &F)
45 : BaseT(TM, F.getParent()->getDataLayout()), ST(TM->getSubtargetImpl(F)), 45 : BaseT(TM, F.getParent()->getDataLayout()), ST(TM->getSubtargetImpl(F)),
46 TLI(ST->getTargetLowering()) {} 46 TLI(ST->getTargetLowering()) {}
47 47
48 // Provide value semantics. MSVC requires that we spell all of these out. 48 bool enableInterleavedAccessVectorization() { return true; }
49 ARMTTIImpl(const ARMTTIImpl &Arg)
50 : BaseT(static_cast<const BaseT &>(Arg)), ST(Arg.ST), TLI(Arg.TLI) {}
51 ARMTTIImpl(ARMTTIImpl &&Arg)
52 : BaseT(std::move(static_cast<BaseT &>(Arg))), ST(std::move(Arg.ST)),
53 TLI(std::move(Arg.TLI)) {}
54 49
55 bool enableInterleavedAccessVectorization() { return true; } 50 /// Floating-point computation using ARMv8 AArch32 Advanced
51 /// SIMD instructions remains unchanged from ARMv7. Only AArch64 SIMD
52 /// is IEEE-754 compliant, but it's not covered in this target.
53 bool isFPVectorizationPotentiallyUnsafe() {
54 return !ST->isTargetDarwin();
55 }
56 56
57 /// \name Scalar TTI Implementations 57 /// \name Scalar TTI Implementations
58 /// @{ 58 /// @{
59 59
60 int getIntImmCodeSizeCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
61 Type *Ty);
62
60 using BaseT::getIntImmCost; 63 using BaseT::getIntImmCost;
61 int getIntImmCost(const APInt &Imm, Type *Ty); 64 int getIntImmCost(const APInt &Imm, Type *Ty);
65
66 int getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm, Type *Ty);
62 67
63 /// @} 68 /// @}
64 69
65 /// \name Vector TTI Implementations 70 /// \name Vector TTI Implementations
66 /// @{ 71 /// @{
86 91
87 return 32; 92 return 32;
88 } 93 }
89 94
90 unsigned getMaxInterleaveFactor(unsigned VF) { 95 unsigned getMaxInterleaveFactor(unsigned VF) {
91 // These are out of order CPUs: 96 return ST->getMaxInterleaveFactor();
92 if (ST->isCortexA15() || ST->isSwift())
93 return 2;
94 return 1;
95 } 97 }
96 98
97 int getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index, Type *SubTp); 99 int getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index, Type *SubTp);
98 100
99 int getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src); 101 int getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src);
117 unsigned AddressSpace); 119 unsigned AddressSpace);
118 120
119 int getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor, 121 int getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy, unsigned Factor,
120 ArrayRef<unsigned> Indices, unsigned Alignment, 122 ArrayRef<unsigned> Indices, unsigned Alignment,
121 unsigned AddressSpace); 123 unsigned AddressSpace);
124
125 bool shouldBuildLookupTablesForConstant(Constant *C) const {
126 // In the ROPI and RWPI relocation models we can't have pointers to global
127 // variables or functions in constant data, so don't convert switches to
128 // lookup tables if any of the values would need relocation.
129 if (ST->isROPI() || ST->isRWPI())
130 return !C->needsRelocation();
131
132 return true;
133 }
122 /// @} 134 /// @}
123 }; 135 };
124 136
125 } // end namespace llvm 137 } // end namespace llvm
126 138