comparison include/llvm/Analysis/TargetTransformInfo.h @ 83:60c9769439b8 LLVM3.7

LLVM 3.7
author Tatsuki IHA <e125716@ie.u-ryukyu.ac.jp>
date Wed, 18 Feb 2015 14:55:36 +0900
parents 54457678186b
children afa8332a0e37
comparison
equal deleted inserted replaced
78:af83660cff7b 83:60c9769439b8
1 //===- llvm/Analysis/TargetTransformInfo.h ----------------------*- C++ -*-===// 1 //===- TargetTransformInfo.h ------------------------------------*- C++ -*-===//
2 // 2 //
3 // The LLVM Compiler Infrastructure 3 // The LLVM Compiler Infrastructure
4 // 4 //
5 // This file is distributed under the University of Illinois Open Source 5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details. 6 // License. See LICENSE.TXT for details.
7 // 7 //
8 //===----------------------------------------------------------------------===// 8 //===----------------------------------------------------------------------===//
9 // 9 /// \file
10 // This pass exposes codegen information to IR-level passes. Every 10 /// This pass exposes codegen information to IR-level passes. Every
11 // transformation that uses codegen information is broken into three parts: 11 /// transformation that uses codegen information is broken into three parts:
12 // 1. The IR-level analysis pass. 12 /// 1. The IR-level analysis pass.
13 // 2. The IR-level transformation interface which provides the needed 13 /// 2. The IR-level transformation interface which provides the needed
14 // information. 14 /// information.
15 // 3. Codegen-level implementation which uses target-specific hooks. 15 /// 3. Codegen-level implementation which uses target-specific hooks.
16 // 16 ///
17 // This file defines #2, which is the interface that IR-level transformations 17 /// This file defines #2, which is the interface that IR-level transformations
18 // use for querying the codegen. 18 /// use for querying the codegen.
19 // 19 ///
20 //===----------------------------------------------------------------------===// 20 //===----------------------------------------------------------------------===//
21 21
22 #ifndef LLVM_ANALYSIS_TARGETTRANSFORMINFO_H 22 #ifndef LLVM_ANALYSIS_TARGETTRANSFORMINFO_H
23 #define LLVM_ANALYSIS_TARGETTRANSFORMINFO_H 23 #define LLVM_ANALYSIS_TARGETTRANSFORMINFO_H
24 24
25 #include "llvm/ADT/Optional.h"
26 #include "llvm/IR/IntrinsicInst.h"
25 #include "llvm/IR/Intrinsics.h" 27 #include "llvm/IR/Intrinsics.h"
26 #include "llvm/Pass.h" 28 #include "llvm/Pass.h"
27 #include "llvm/Support/DataTypes.h" 29 #include "llvm/Support/DataTypes.h"
28 30
29 namespace llvm { 31 namespace llvm {
30 32
33 class Function;
31 class GlobalValue; 34 class GlobalValue;
32 class Loop; 35 class Loop;
36 class PreservedAnalyses;
33 class Type; 37 class Type;
34 class User; 38 class User;
35 class Value; 39 class Value;
36 40
37 /// TargetTransformInfo - This pass provides access to the codegen 41 /// \brief Information about a load/store intrinsic defined by the target.
38 /// interfaces that are needed for IR-level transformations. 42 struct MemIntrinsicInfo {
43 MemIntrinsicInfo()
44 : ReadMem(false), WriteMem(false), Vol(false), MatchingId(0),
45 NumMemRefs(0), PtrVal(nullptr) {}
46 bool ReadMem;
47 bool WriteMem;
48 bool Vol;
49 // Same Id is set by the target for corresponding load/store intrinsics.
50 unsigned short MatchingId;
51 int NumMemRefs;
52 Value *PtrVal;
53 };
54
55 /// \brief This pass provides access to the codegen interfaces that are needed
56 /// for IR-level transformations.
39 class TargetTransformInfo { 57 class TargetTransformInfo {
40 protected:
41 /// \brief The TTI instance one level down the stack.
42 ///
43 /// This is used to implement the default behavior all of the methods which
44 /// is to delegate up through the stack of TTIs until one can answer the
45 /// query.
46 TargetTransformInfo *PrevTTI;
47
48 /// \brief The top of the stack of TTI analyses available.
49 ///
50 /// This is a convenience routine maintained as TTI analyses become available
51 /// that complements the PrevTTI delegation chain. When one part of an
52 /// analysis pass wants to query another part of the analysis pass it can use
53 /// this to start back at the top of the stack.
54 TargetTransformInfo *TopTTI;
55
56 /// All pass subclasses must in their initializePass routine call
57 /// pushTTIStack with themselves to update the pointers tracking the previous
58 /// TTI instance in the analysis group's stack, and the top of the analysis
59 /// group's stack.
60 void pushTTIStack(Pass *P);
61
62 /// All pass subclasses must call TargetTransformInfo::getAnalysisUsage.
63 virtual void getAnalysisUsage(AnalysisUsage &AU) const;
64
65 public: 58 public:
66 /// This class is intended to be subclassed by real implementations. 59 /// \brief Construct a TTI object using a type implementing the \c Concept
67 virtual ~TargetTransformInfo() = 0; 60 /// API below.
61 ///
62 /// This is used by targets to construct a TTI wrapping their target-specific
63 /// implementaion that encodes appropriate costs for their target.
64 template <typename T> TargetTransformInfo(T Impl);
65
66 /// \brief Construct a baseline TTI object using a minimal implementation of
67 /// the \c Concept API below.
68 ///
69 /// The TTI implementation will reflect the information in the DataLayout
70 /// provided if non-null.
71 explicit TargetTransformInfo(const DataLayout *DL);
72
73 // Provide move semantics.
74 TargetTransformInfo(TargetTransformInfo &&Arg);
75 TargetTransformInfo &operator=(TargetTransformInfo &&RHS);
76
77 // We need to define the destructor out-of-line to define our sub-classes
78 // out-of-line.
79 ~TargetTransformInfo();
80
81 /// \brief Handle the invalidation of this information.
82 ///
83 /// When used as a result of \c TargetIRAnalysis this method will be called
84 /// when the function this was computed for changes. When it returns false,
85 /// the information is preserved across those changes.
86 bool invalidate(Function &, const PreservedAnalyses &) {
87 // FIXME: We should probably in some way ensure that the subtarget
88 // information for a function hasn't changed.
89 return false;
90 }
68 91
69 /// \name Generic Target Information 92 /// \name Generic Target Information
70 /// @{ 93 /// @{
71 94
72 /// \brief Underlying constants for 'cost' values in this interface. 95 /// \brief Underlying constants for 'cost' values in this interface.
83 /// cost and execution cost. A free instruction is typically one that folds 106 /// cost and execution cost. A free instruction is typically one that folds
84 /// into another instruction. For example, reg-to-reg moves can often be 107 /// into another instruction. For example, reg-to-reg moves can often be
85 /// skipped by renaming the registers in the CPU, but they still are encoded 108 /// skipped by renaming the registers in the CPU, but they still are encoded
86 /// and thus wouldn't be considered 'free' here. 109 /// and thus wouldn't be considered 'free' here.
87 enum TargetCostConstants { 110 enum TargetCostConstants {
88 TCC_Free = 0, ///< Expected to fold away in lowering. 111 TCC_Free = 0, ///< Expected to fold away in lowering.
89 TCC_Basic = 1, ///< The cost of a typical 'add' instruction. 112 TCC_Basic = 1, ///< The cost of a typical 'add' instruction.
90 TCC_Expensive = 4 ///< The cost of a 'div' instruction on x86. 113 TCC_Expensive = 4 ///< The cost of a 'div' instruction on x86.
91 }; 114 };
92 115
93 /// \brief Estimate the cost of a specific operation when lowered. 116 /// \brief Estimate the cost of a specific operation when lowered.
94 /// 117 ///
95 /// Note that this is designed to work on an arbitrary synthetic opcode, and 118 /// Note that this is designed to work on an arbitrary synthetic opcode, and
102 /// omitted. However, if the opcode is one of the cast instructions, the 125 /// omitted. However, if the opcode is one of the cast instructions, the
103 /// operand type is required. 126 /// operand type is required.
104 /// 127 ///
105 /// The returned cost is defined in terms of \c TargetCostConstants, see its 128 /// The returned cost is defined in terms of \c TargetCostConstants, see its
106 /// comments for a detailed explanation of the cost values. 129 /// comments for a detailed explanation of the cost values.
107 virtual unsigned getOperationCost(unsigned Opcode, Type *Ty, 130 unsigned getOperationCost(unsigned Opcode, Type *Ty,
108 Type *OpTy = nullptr) const; 131 Type *OpTy = nullptr) const;
109 132
110 /// \brief Estimate the cost of a GEP operation when lowered. 133 /// \brief Estimate the cost of a GEP operation when lowered.
111 /// 134 ///
112 /// The contract for this function is the same as \c getOperationCost except 135 /// The contract for this function is the same as \c getOperationCost except
113 /// that it supports an interface that provides extra information specific to 136 /// that it supports an interface that provides extra information specific to
114 /// the GEP operation. 137 /// the GEP operation.
115 virtual unsigned getGEPCost(const Value *Ptr, 138 unsigned getGEPCost(const Value *Ptr, ArrayRef<const Value *> Operands) const;
116 ArrayRef<const Value *> Operands) const;
117 139
118 /// \brief Estimate the cost of a function call when lowered. 140 /// \brief Estimate the cost of a function call when lowered.
119 /// 141 ///
120 /// The contract for this is the same as \c getOperationCost except that it 142 /// The contract for this is the same as \c getOperationCost except that it
121 /// supports an interface that provides extra information specific to call 143 /// supports an interface that provides extra information specific to call
122 /// instructions. 144 /// instructions.
123 /// 145 ///
124 /// This is the most basic query for estimating call cost: it only knows the 146 /// This is the most basic query for estimating call cost: it only knows the
125 /// function type and (potentially) the number of arguments at the call site. 147 /// function type and (potentially) the number of arguments at the call site.
126 /// The latter is only interesting for varargs function types. 148 /// The latter is only interesting for varargs function types.
127 virtual unsigned getCallCost(FunctionType *FTy, int NumArgs = -1) const; 149 unsigned getCallCost(FunctionType *FTy, int NumArgs = -1) const;
128 150
129 /// \brief Estimate the cost of calling a specific function when lowered. 151 /// \brief Estimate the cost of calling a specific function when lowered.
130 /// 152 ///
131 /// This overload adds the ability to reason about the particular function 153 /// This overload adds the ability to reason about the particular function
132 /// being called in the event it is a library call with special lowering. 154 /// being called in the event it is a library call with special lowering.
133 virtual unsigned getCallCost(const Function *F, int NumArgs = -1) const; 155 unsigned getCallCost(const Function *F, int NumArgs = -1) const;
134 156
135 /// \brief Estimate the cost of calling a specific function when lowered. 157 /// \brief Estimate the cost of calling a specific function when lowered.
136 /// 158 ///
137 /// This overload allows specifying a set of candidate argument values. 159 /// This overload allows specifying a set of candidate argument values.
138 virtual unsigned getCallCost(const Function *F, 160 unsigned getCallCost(const Function *F,
139 ArrayRef<const Value *> Arguments) const; 161 ArrayRef<const Value *> Arguments) const;
140 162
141 /// \brief Estimate the cost of an intrinsic when lowered. 163 /// \brief Estimate the cost of an intrinsic when lowered.
142 /// 164 ///
143 /// Mirrors the \c getCallCost method but uses an intrinsic identifier. 165 /// Mirrors the \c getCallCost method but uses an intrinsic identifier.
144 virtual unsigned getIntrinsicCost(Intrinsic::ID IID, Type *RetTy, 166 unsigned getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
145 ArrayRef<Type *> ParamTys) const; 167 ArrayRef<Type *> ParamTys) const;
146 168
147 /// \brief Estimate the cost of an intrinsic when lowered. 169 /// \brief Estimate the cost of an intrinsic when lowered.
148 /// 170 ///
149 /// Mirrors the \c getCallCost method but uses an intrinsic identifier. 171 /// Mirrors the \c getCallCost method but uses an intrinsic identifier.
150 virtual unsigned getIntrinsicCost(Intrinsic::ID IID, Type *RetTy, 172 unsigned getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
151 ArrayRef<const Value *> Arguments) const; 173 ArrayRef<const Value *> Arguments) const;
152 174
153 /// \brief Estimate the cost of a given IR user when lowered. 175 /// \brief Estimate the cost of a given IR user when lowered.
154 /// 176 ///
155 /// This can estimate the cost of either a ConstantExpr or Instruction when 177 /// This can estimate the cost of either a ConstantExpr or Instruction when
156 /// lowered. It has two primary advantages over the \c getOperationCost and 178 /// lowered. It has two primary advantages over the \c getOperationCost and
163 /// other context they may not be folded. This routine can distinguish such 185 /// other context they may not be folded. This routine can distinguish such
164 /// cases. 186 /// cases.
165 /// 187 ///
166 /// The returned cost is defined in terms of \c TargetCostConstants, see its 188 /// The returned cost is defined in terms of \c TargetCostConstants, see its
167 /// comments for a detailed explanation of the cost values. 189 /// comments for a detailed explanation of the cost values.
168 virtual unsigned getUserCost(const User *U) const; 190 unsigned getUserCost(const User *U) const;
169 191
170 /// \brief hasBranchDivergence - Return true if branch divergence exists. 192 /// \brief hasBranchDivergence - Return true if branch divergence exists.
171 /// Branch divergence has a significantly negative impact on GPU performance 193 /// Branch divergence has a significantly negative impact on GPU performance
172 /// when threads in the same wavefront take different paths due to conditional 194 /// when threads in the same wavefront take different paths due to conditional
173 /// branches. 195 /// branches.
174 virtual bool hasBranchDivergence() const; 196 bool hasBranchDivergence() const;
175 197
176 /// \brief Test whether calls to a function lower to actual program function 198 /// \brief Test whether calls to a function lower to actual program function
177 /// calls. 199 /// calls.
178 /// 200 ///
179 /// The idea is to test whether the program is likely to require a 'call' 201 /// The idea is to test whether the program is likely to require a 'call'
183 /// should probably move to simpler cost metrics using the above. 205 /// should probably move to simpler cost metrics using the above.
184 /// Alternatively, we could split the cost interface into distinct code-size 206 /// Alternatively, we could split the cost interface into distinct code-size
185 /// and execution-speed costs. This would allow modelling the core of this 207 /// and execution-speed costs. This would allow modelling the core of this
186 /// query more accurately as a call is a single small instruction, but 208 /// query more accurately as a call is a single small instruction, but
187 /// incurs significant execution cost. 209 /// incurs significant execution cost.
188 virtual bool isLoweredToCall(const Function *F) const; 210 bool isLoweredToCall(const Function *F) const;
189 211
190 /// Parameters that control the generic loop unrolling transformation. 212 /// Parameters that control the generic loop unrolling transformation.
191 struct UnrollingPreferences { 213 struct UnrollingPreferences {
192 /// The cost threshold for the unrolled loop, compared to 214 /// The cost threshold for the unrolled loop, compared to
193 /// CodeMetrics.NumInsts aggregated over all basic blocks in the loop body. 215 /// CodeMetrics.NumInsts aggregated over all basic blocks in the loop body.
194 /// The unrolling factor is set such that the unrolled loop body does not 216 /// The unrolling factor is set such that the unrolled loop body does not
195 /// exceed this cost. Set this to UINT_MAX to disable the loop body cost 217 /// exceed this cost. Set this to UINT_MAX to disable the loop body cost
196 /// restriction. 218 /// restriction.
197 unsigned Threshold; 219 unsigned Threshold;
220 /// If complete unrolling could help other optimizations (e.g. InstSimplify)
221 /// to remove N% of instructions, then we can go beyond unroll threshold.
222 /// This value set the minimal percent for allowing that.
223 unsigned MinPercentOfOptimized;
224 /// The absolute cost threshold. We won't go beyond this even if complete
225 /// unrolling could result in optimizing out 90% of instructions.
226 unsigned AbsoluteThreshold;
198 /// The cost threshold for the unrolled loop when optimizing for size (set 227 /// The cost threshold for the unrolled loop when optimizing for size (set
199 /// to UINT_MAX to disable). 228 /// to UINT_MAX to disable).
200 unsigned OptSizeThreshold; 229 unsigned OptSizeThreshold;
201 /// The cost threshold for the unrolled loop, like Threshold, but used 230 /// The cost threshold for the unrolled loop, like Threshold, but used
202 /// for partial/runtime unrolling (set to UINT_MAX to disable). 231 /// for partial/runtime unrolling (set to UINT_MAX to disable).
203 unsigned PartialThreshold; 232 unsigned PartialThreshold;
204 /// The cost threshold for the unrolled loop when optimizing for size, like 233 /// The cost threshold for the unrolled loop when optimizing for size, like
205 /// OptSizeThreshold, but used for partial/runtime unrolling (set to UINT_MAX 234 /// OptSizeThreshold, but used for partial/runtime unrolling (set to
206 /// to disable). 235 /// UINT_MAX to disable).
207 unsigned PartialOptSizeThreshold; 236 unsigned PartialOptSizeThreshold;
208 /// A forced unrolling factor (the number of concatenated bodies of the 237 /// A forced unrolling factor (the number of concatenated bodies of the
209 /// original loop in the unrolled loop body). When set to 0, the unrolling 238 /// original loop in the unrolled loop body). When set to 0, the unrolling
210 /// transformation will select an unrolling factor based on the current cost 239 /// transformation will select an unrolling factor based on the current cost
211 /// threshold and other factors. 240 /// threshold and other factors.
215 // (set to UINT_MAX to disable). This does not apply in cases where the 244 // (set to UINT_MAX to disable). This does not apply in cases where the
216 // loop is being fully unrolled. 245 // loop is being fully unrolled.
217 unsigned MaxCount; 246 unsigned MaxCount;
218 /// Allow partial unrolling (unrolling of loops to expand the size of the 247 /// Allow partial unrolling (unrolling of loops to expand the size of the
219 /// loop body, not only to eliminate small constant-trip-count loops). 248 /// loop body, not only to eliminate small constant-trip-count loops).
220 bool Partial; 249 bool Partial;
221 /// Allow runtime unrolling (unrolling of loops to expand the size of the 250 /// Allow runtime unrolling (unrolling of loops to expand the size of the
222 /// loop body even when the number of loop iterations is not known at compile 251 /// loop body even when the number of loop iterations is not known at
223 /// time). 252 /// compile time).
224 bool Runtime; 253 bool Runtime;
225 }; 254 };
226 255
227 /// \brief Get target-customized preferences for the generic loop unrolling 256 /// \brief Get target-customized preferences for the generic loop unrolling
228 /// transformation. The caller will initialize UP with the current 257 /// transformation. The caller will initialize UP with the current
229 /// target-independent defaults. 258 /// target-independent defaults.
230 virtual void getUnrollingPreferences(Loop *L, UnrollingPreferences &UP) const; 259 void getUnrollingPreferences(Loop *L, UnrollingPreferences &UP) const;
231 260
232 /// @} 261 /// @}
233 262
234 /// \name Scalar Target Information 263 /// \name Scalar Target Information
235 /// @{ 264 /// @{
240 /// significantly boost the performance when the population is dense, and it 269 /// significantly boost the performance when the population is dense, and it
241 /// may or may not degrade performance if the population is sparse. A HW 270 /// may or may not degrade performance if the population is sparse. A HW
242 /// support is considered as "Fast" if it can outperform, or is on a par 271 /// support is considered as "Fast" if it can outperform, or is on a par
243 /// with, SW implementation when the population is sparse; otherwise, it is 272 /// with, SW implementation when the population is sparse; otherwise, it is
244 /// considered as "Slow". 273 /// considered as "Slow".
245 enum PopcntSupportKind { 274 enum PopcntSupportKind { PSK_Software, PSK_SlowHardware, PSK_FastHardware };
246 PSK_Software,
247 PSK_SlowHardware,
248 PSK_FastHardware
249 };
250 275
251 /// \brief Return true if the specified immediate is legal add immediate, that 276 /// \brief Return true if the specified immediate is legal add immediate, that
252 /// is the target has add instructions which can add a register with the 277 /// is the target has add instructions which can add a register with the
253 /// immediate without having to materialize the immediate into a register. 278 /// immediate without having to materialize the immediate into a register.
254 virtual bool isLegalAddImmediate(int64_t Imm) const; 279 bool isLegalAddImmediate(int64_t Imm) const;
255 280
256 /// \brief Return true if the specified immediate is legal icmp immediate, 281 /// \brief Return true if the specified immediate is legal icmp immediate,
257 /// that is the target has icmp instructions which can compare a register 282 /// that is the target has icmp instructions which can compare a register
258 /// against the immediate without having to materialize the immediate into a 283 /// against the immediate without having to materialize the immediate into a
259 /// register. 284 /// register.
260 virtual bool isLegalICmpImmediate(int64_t Imm) const; 285 bool isLegalICmpImmediate(int64_t Imm) const;
261 286
262 /// \brief Return true if the addressing mode represented by AM is legal for 287 /// \brief Return true if the addressing mode represented by AM is legal for
263 /// this target, for a load/store of the specified type. 288 /// this target, for a load/store of the specified type.
264 /// The type may be VoidTy, in which case only return true if the addressing 289 /// The type may be VoidTy, in which case only return true if the addressing
265 /// mode is legal for a load/store of any legal type. 290 /// mode is legal for a load/store of any legal type.
266 /// TODO: Handle pre/postinc as well. 291 /// TODO: Handle pre/postinc as well.
267 virtual bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, 292 bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
268 int64_t BaseOffset, bool HasBaseReg, 293 bool HasBaseReg, int64_t Scale) const;
269 int64_t Scale) const; 294
295 /// \brief Return true if the target works with masked instruction
296 /// AVX2 allows masks for consecutive load and store for i32 and i64 elements.
297 /// AVX-512 architecture will also allow masks for non-consecutive memory
298 /// accesses.
299 bool isLegalMaskedStore(Type *DataType, int Consecutive) const;
300 bool isLegalMaskedLoad(Type *DataType, int Consecutive) const;
270 301
271 /// \brief Return the cost of the scaling factor used in the addressing 302 /// \brief Return the cost of the scaling factor used in the addressing
272 /// mode represented by AM for this target, for a load/store 303 /// mode represented by AM for this target, for a load/store
273 /// of the specified type. 304 /// of the specified type.
274 /// If the AM is supported, the return value must be >= 0. 305 /// If the AM is supported, the return value must be >= 0.
275 /// If the AM is not supported, it returns a negative value. 306 /// If the AM is not supported, it returns a negative value.
276 /// TODO: Handle pre/postinc as well. 307 /// TODO: Handle pre/postinc as well.
277 virtual int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, 308 int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
278 int64_t BaseOffset, bool HasBaseReg, 309 bool HasBaseReg, int64_t Scale) const;
279 int64_t Scale) const;
280 310
281 /// \brief Return true if it's free to truncate a value of type Ty1 to type 311 /// \brief Return true if it's free to truncate a value of type Ty1 to type
282 /// Ty2. e.g. On x86 it's free to truncate a i32 value in register EAX to i16 312 /// Ty2. e.g. On x86 it's free to truncate a i32 value in register EAX to i16
283 /// by referencing its sub-register AX. 313 /// by referencing its sub-register AX.
284 virtual bool isTruncateFree(Type *Ty1, Type *Ty2) const; 314 bool isTruncateFree(Type *Ty1, Type *Ty2) const;
285 315
286 /// \brief Return true if this type is legal. 316 /// \brief Return true if this type is legal.
287 virtual bool isTypeLegal(Type *Ty) const; 317 bool isTypeLegal(Type *Ty) const;
288 318
289 /// \brief Returns the target's jmp_buf alignment in bytes. 319 /// \brief Returns the target's jmp_buf alignment in bytes.
290 virtual unsigned getJumpBufAlignment() const; 320 unsigned getJumpBufAlignment() const;
291 321
292 /// \brief Returns the target's jmp_buf size in bytes. 322 /// \brief Returns the target's jmp_buf size in bytes.
293 virtual unsigned getJumpBufSize() const; 323 unsigned getJumpBufSize() const;
294 324
295 /// \brief Return true if switches should be turned into lookup tables for the 325 /// \brief Return true if switches should be turned into lookup tables for the
296 /// target. 326 /// target.
297 virtual bool shouldBuildLookupTables() const; 327 bool shouldBuildLookupTables() const;
298 328
299 /// \brief Return hardware support for population count. 329 /// \brief Return hardware support for population count.
300 virtual PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) const; 330 PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) const;
301 331
302 /// \brief Return true if the hardware has a fast square-root instruction. 332 /// \brief Return true if the hardware has a fast square-root instruction.
303 virtual bool haveFastSqrt(Type *Ty) const; 333 bool haveFastSqrt(Type *Ty) const;
334
335 /// \brief Return the expected cost of supporting the floating point operation
336 /// of the specified type.
337 unsigned getFPOpCost(Type *Ty) const;
304 338
305 /// \brief Return the expected cost of materializing for the given integer 339 /// \brief Return the expected cost of materializing for the given integer
306 /// immediate of the specified type. 340 /// immediate of the specified type.
307 virtual unsigned getIntImmCost(const APInt &Imm, Type *Ty) const; 341 unsigned getIntImmCost(const APInt &Imm, Type *Ty) const;
308 342
309 /// \brief Return the expected cost of materialization for the given integer 343 /// \brief Return the expected cost of materialization for the given integer
310 /// immediate of the specified type for a given instruction. The cost can be 344 /// immediate of the specified type for a given instruction. The cost can be
311 /// zero if the immediate can be folded into the specified instruction. 345 /// zero if the immediate can be folded into the specified instruction.
312 virtual unsigned getIntImmCost(unsigned Opc, unsigned Idx, const APInt &Imm, 346 unsigned getIntImmCost(unsigned Opc, unsigned Idx, const APInt &Imm,
313 Type *Ty) const; 347 Type *Ty) const;
314 virtual unsigned getIntImmCost(Intrinsic::ID IID, unsigned Idx, 348 unsigned getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
315 const APInt &Imm, Type *Ty) const; 349 Type *Ty) const;
316 /// @} 350 /// @}
317 351
318 /// \name Vector Target Information 352 /// \name Vector Target Information
319 /// @{ 353 /// @{
320 354
327 SK_ExtractSubvector ///< ExtractSubvector Index indicates start offset. 361 SK_ExtractSubvector ///< ExtractSubvector Index indicates start offset.
328 }; 362 };
329 363
330 /// \brief Additional information about an operand's possible values. 364 /// \brief Additional information about an operand's possible values.
331 enum OperandValueKind { 365 enum OperandValueKind {
332 OK_AnyValue, // Operand can have any value. 366 OK_AnyValue, // Operand can have any value.
333 OK_UniformValue, // Operand is uniform (splat of a value). 367 OK_UniformValue, // Operand is uniform (splat of a value).
334 OK_UniformConstantValue, // Operand is uniform constant. 368 OK_UniformConstantValue, // Operand is uniform constant.
335 OK_NonUniformConstantValue // Operand is a non uniform constant value. 369 OK_NonUniformConstantValue // Operand is a non uniform constant value.
336 }; 370 };
337 371
338 /// \brief Additional properties of an operand's values. 372 /// \brief Additional properties of an operand's values.
339 enum OperandValueProperties { OP_None = 0, OP_PowerOf2 = 1 }; 373 enum OperandValueProperties { OP_None = 0, OP_PowerOf2 = 1 };
340 374
341 /// \return The number of scalar or vector registers that the target has. 375 /// \return The number of scalar or vector registers that the target has.
342 /// If 'Vectors' is true, it returns the number of vector registers. If it is 376 /// If 'Vectors' is true, it returns the number of vector registers. If it is
343 /// set to false, it returns the number of scalar registers. 377 /// set to false, it returns the number of scalar registers.
344 virtual unsigned getNumberOfRegisters(bool Vector) const; 378 unsigned getNumberOfRegisters(bool Vector) const;
345 379
346 /// \return The width of the largest scalar or vector register type. 380 /// \return The width of the largest scalar or vector register type.
347 virtual unsigned getRegisterBitWidth(bool Vector) const; 381 unsigned getRegisterBitWidth(bool Vector) const;
348 382
349 /// \return The maximum unroll factor that the vectorizer should try to 383 /// \return The maximum interleave factor that any transform should try to
350 /// perform for this target. This number depends on the level of parallelism 384 /// perform for this target. This number depends on the level of parallelism
351 /// and the number of execution units in the CPU. 385 /// and the number of execution units in the CPU.
352 virtual unsigned getMaximumUnrollFactor() const; 386 unsigned getMaxInterleaveFactor() const;
353 387
354 /// \return The expected cost of arithmetic ops, such as mul, xor, fsub, etc. 388 /// \return The expected cost of arithmetic ops, such as mul, xor, fsub, etc.
355 virtual unsigned 389 unsigned
356 getArithmeticInstrCost(unsigned Opcode, Type *Ty, 390 getArithmeticInstrCost(unsigned Opcode, Type *Ty,
357 OperandValueKind Opd1Info = OK_AnyValue, 391 OperandValueKind Opd1Info = OK_AnyValue,
358 OperandValueKind Opd2Info = OK_AnyValue, 392 OperandValueKind Opd2Info = OK_AnyValue,
359 OperandValueProperties Opd1PropInfo = OP_None, 393 OperandValueProperties Opd1PropInfo = OP_None,
360 OperandValueProperties Opd2PropInfo = OP_None) const; 394 OperandValueProperties Opd2PropInfo = OP_None) const;
361 395
362 /// \return The cost of a shuffle instruction of kind Kind and of type Tp. 396 /// \return The cost of a shuffle instruction of kind Kind and of type Tp.
363 /// The index and subtype parameters are used by the subvector insertion and 397 /// The index and subtype parameters are used by the subvector insertion and
364 /// extraction shuffle kinds. 398 /// extraction shuffle kinds.
365 virtual unsigned getShuffleCost(ShuffleKind Kind, Type *Tp, int Index = 0, 399 unsigned getShuffleCost(ShuffleKind Kind, Type *Tp, int Index = 0,
366 Type *SubTp = nullptr) const; 400 Type *SubTp = nullptr) const;
367 401
368 /// \return The expected cost of cast instructions, such as bitcast, trunc, 402 /// \return The expected cost of cast instructions, such as bitcast, trunc,
369 /// zext, etc. 403 /// zext, etc.
370 virtual unsigned getCastInstrCost(unsigned Opcode, Type *Dst, 404 unsigned getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) const;
371 Type *Src) const;
372 405
373 /// \return The expected cost of control-flow related instructions such as 406 /// \return The expected cost of control-flow related instructions such as
374 /// Phi, Ret, Br. 407 /// Phi, Ret, Br.
375 virtual unsigned getCFInstrCost(unsigned Opcode) const; 408 unsigned getCFInstrCost(unsigned Opcode) const;
376 409
377 /// \returns The expected cost of compare and select instructions. 410 /// \returns The expected cost of compare and select instructions.
378 virtual unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy, 411 unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
379 Type *CondTy = nullptr) const; 412 Type *CondTy = nullptr) const;
380 413
381 /// \return The expected cost of vector Insert and Extract. 414 /// \return The expected cost of vector Insert and Extract.
382 /// Use -1 to indicate that there is no information on the index value. 415 /// Use -1 to indicate that there is no information on the index value.
383 virtual unsigned getVectorInstrCost(unsigned Opcode, Type *Val, 416 unsigned getVectorInstrCost(unsigned Opcode, Type *Val,
384 unsigned Index = -1) const; 417 unsigned Index = -1) const;
385 418
386 /// \return The cost of Load and Store instructions. 419 /// \return The cost of Load and Store instructions.
387 virtual unsigned getMemoryOpCost(unsigned Opcode, Type *Src, 420 unsigned getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
388 unsigned Alignment, 421 unsigned AddressSpace) const;
389 unsigned AddressSpace) const; 422
423 /// \return The cost of masked Load and Store instructions.
424 unsigned getMaskedMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
425 unsigned AddressSpace) const;
390 426
391 /// \brief Calculate the cost of performing a vector reduction. 427 /// \brief Calculate the cost of performing a vector reduction.
392 /// 428 ///
393 /// This is the cost of reducing the vector value of type \p Ty to a scalar 429 /// This is the cost of reducing the vector value of type \p Ty to a scalar
394 /// value using the operation denoted by \p Opcode. The form of the reduction 430 /// value using the operation denoted by \p Opcode. The form of the reduction
399 /// (v0, v1, v2, v3) 435 /// (v0, v1, v2, v3)
400 /// ((v0+v1), (v2, v3), undef, undef) 436 /// ((v0+v1), (v2, v3), undef, undef)
401 /// Split: 437 /// Split:
402 /// (v0, v1, v2, v3) 438 /// (v0, v1, v2, v3)
403 /// ((v0+v2), (v1+v3), undef, undef) 439 /// ((v0+v2), (v1+v3), undef, undef)
404 virtual unsigned getReductionCost(unsigned Opcode, Type *Ty, 440 unsigned getReductionCost(unsigned Opcode, Type *Ty,
405 bool IsPairwiseForm) const; 441 bool IsPairwiseForm) const;
406 442
407 /// \returns The cost of Intrinsic instructions. 443 /// \returns The cost of Intrinsic instructions.
408 virtual unsigned getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy, 444 unsigned getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
409 ArrayRef<Type *> Tys) const; 445 ArrayRef<Type *> Tys) const;
410 446
411 /// \returns The number of pieces into which the provided type must be 447 /// \returns The number of pieces into which the provided type must be
412 /// split during legalization. Zero is returned when the answer is unknown. 448 /// split during legalization. Zero is returned when the answer is unknown.
413 virtual unsigned getNumberOfParts(Type *Tp) const; 449 unsigned getNumberOfParts(Type *Tp) const;
414 450
415 /// \returns The cost of the address computation. For most targets this can be 451 /// \returns The cost of the address computation. For most targets this can be
416 /// merged into the instruction indexing mode. Some targets might want to 452 /// merged into the instruction indexing mode. Some targets might want to
417 /// distinguish between address computation for memory operations on vector 453 /// distinguish between address computation for memory operations on vector
418 /// types and scalar types. Such targets should override this function. 454 /// types and scalar types. Such targets should override this function.
419 /// The 'IsComplex' parameter is a hint that the address computation is likely 455 /// The 'IsComplex' parameter is a hint that the address computation is likely
420 /// to involve multiple instructions and as such unlikely to be merged into 456 /// to involve multiple instructions and as such unlikely to be merged into
421 /// the address indexing mode. 457 /// the address indexing mode.
422 virtual unsigned getAddressComputationCost(Type *Ty, 458 unsigned getAddressComputationCost(Type *Ty, bool IsComplex = false) const;
423 bool IsComplex = false) const;
424 459
425 /// \returns The cost, if any, of keeping values of the given types alive 460 /// \returns The cost, if any, of keeping values of the given types alive
426 /// over a callsite. 461 /// over a callsite.
427 /// 462 ///
428 /// Some types may require the use of register classes that do not have 463 /// Some types may require the use of register classes that do not have
429 /// any callee-saved registers, so would require a spill and fill. 464 /// any callee-saved registers, so would require a spill and fill.
430 virtual unsigned getCostOfKeepingLiveOverCall(ArrayRef<Type*> Tys) const; 465 unsigned getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) const;
466
467 /// \returns True if the intrinsic is a supported memory intrinsic. Info
468 /// will contain additional information - whether the intrinsic may write
469 /// or read to memory, volatility and the pointer. Info is undefined
470 /// if false is returned.
471 bool getTgtMemIntrinsic(IntrinsicInst *Inst, MemIntrinsicInfo &Info) const;
472
473 /// \returns A value which is the result of the given memory intrinsic. New
474 /// instructions may be created to extract the result from the given intrinsic
475 /// memory operation. Returns nullptr if the target cannot create a result
476 /// from the given intrinsic.
477 Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
478 Type *ExpectedType) const;
431 479
432 /// @} 480 /// @}
433 481
434 /// Analysis group identification. 482 private:
483 /// \brief The abstract base class used to type erase specific TTI
484 /// implementations.
485 class Concept;
486
487 /// \brief The template model for the base class which wraps a concrete
488 /// implementation in a type erased interface.
489 template <typename T> class Model;
490
491 std::unique_ptr<Concept> TTIImpl;
492 };
493
494 class TargetTransformInfo::Concept {
495 public:
496 virtual ~Concept() = 0;
497
498 virtual unsigned getOperationCost(unsigned Opcode, Type *Ty, Type *OpTy) = 0;
499 virtual unsigned getGEPCost(const Value *Ptr,
500 ArrayRef<const Value *> Operands) = 0;
501 virtual unsigned getCallCost(FunctionType *FTy, int NumArgs) = 0;
502 virtual unsigned getCallCost(const Function *F, int NumArgs) = 0;
503 virtual unsigned getCallCost(const Function *F,
504 ArrayRef<const Value *> Arguments) = 0;
505 virtual unsigned getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
506 ArrayRef<Type *> ParamTys) = 0;
507 virtual unsigned getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
508 ArrayRef<const Value *> Arguments) = 0;
509 virtual unsigned getUserCost(const User *U) = 0;
510 virtual bool hasBranchDivergence() = 0;
511 virtual bool isLoweredToCall(const Function *F) = 0;
512 virtual void getUnrollingPreferences(Loop *L, UnrollingPreferences &UP) = 0;
513 virtual bool isLegalAddImmediate(int64_t Imm) = 0;
514 virtual bool isLegalICmpImmediate(int64_t Imm) = 0;
515 virtual bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV,
516 int64_t BaseOffset, bool HasBaseReg,
517 int64_t Scale) = 0;
518 virtual bool isLegalMaskedStore(Type *DataType, int Consecutive) = 0;
519 virtual bool isLegalMaskedLoad(Type *DataType, int Consecutive) = 0;
520 virtual int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV,
521 int64_t BaseOffset, bool HasBaseReg,
522 int64_t Scale) = 0;
523 virtual bool isTruncateFree(Type *Ty1, Type *Ty2) = 0;
524 virtual bool isTypeLegal(Type *Ty) = 0;
525 virtual unsigned getJumpBufAlignment() = 0;
526 virtual unsigned getJumpBufSize() = 0;
527 virtual bool shouldBuildLookupTables() = 0;
528 virtual PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) = 0;
529 virtual bool haveFastSqrt(Type *Ty) = 0;
530 virtual unsigned getFPOpCost(Type *Ty) = 0;
531 virtual unsigned getIntImmCost(const APInt &Imm, Type *Ty) = 0;
532 virtual unsigned getIntImmCost(unsigned Opc, unsigned Idx, const APInt &Imm,
533 Type *Ty) = 0;
534 virtual unsigned getIntImmCost(Intrinsic::ID IID, unsigned Idx,
535 const APInt &Imm, Type *Ty) = 0;
536 virtual unsigned getNumberOfRegisters(bool Vector) = 0;
537 virtual unsigned getRegisterBitWidth(bool Vector) = 0;
538 virtual unsigned getMaxInterleaveFactor() = 0;
539 virtual unsigned
540 getArithmeticInstrCost(unsigned Opcode, Type *Ty, OperandValueKind Opd1Info,
541 OperandValueKind Opd2Info,
542 OperandValueProperties Opd1PropInfo,
543 OperandValueProperties Opd2PropInfo) = 0;
544 virtual unsigned getShuffleCost(ShuffleKind Kind, Type *Tp, int Index,
545 Type *SubTp) = 0;
546 virtual unsigned getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) = 0;
547 virtual unsigned getCFInstrCost(unsigned Opcode) = 0;
548 virtual unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
549 Type *CondTy) = 0;
550 virtual unsigned getVectorInstrCost(unsigned Opcode, Type *Val,
551 unsigned Index) = 0;
552 virtual unsigned getMemoryOpCost(unsigned Opcode, Type *Src,
553 unsigned Alignment,
554 unsigned AddressSpace) = 0;
555 virtual unsigned getMaskedMemoryOpCost(unsigned Opcode, Type *Src,
556 unsigned Alignment,
557 unsigned AddressSpace) = 0;
558 virtual unsigned getReductionCost(unsigned Opcode, Type *Ty,
559 bool IsPairwiseForm) = 0;
560 virtual unsigned getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
561 ArrayRef<Type *> Tys) = 0;
562 virtual unsigned getNumberOfParts(Type *Tp) = 0;
563 virtual unsigned getAddressComputationCost(Type *Ty, bool IsComplex) = 0;
564 virtual unsigned getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) = 0;
565 virtual bool getTgtMemIntrinsic(IntrinsicInst *Inst,
566 MemIntrinsicInfo &Info) = 0;
567 virtual Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
568 Type *ExpectedType) = 0;
569 };
570
571 template <typename T>
572 class TargetTransformInfo::Model final : public TargetTransformInfo::Concept {
573 T Impl;
574
575 public:
576 Model(T Impl) : Impl(std::move(Impl)) {}
577 ~Model() override {}
578
579 unsigned getOperationCost(unsigned Opcode, Type *Ty, Type *OpTy) override {
580 return Impl.getOperationCost(Opcode, Ty, OpTy);
581 }
582 unsigned getGEPCost(const Value *Ptr,
583 ArrayRef<const Value *> Operands) override {
584 return Impl.getGEPCost(Ptr, Operands);
585 }
586 unsigned getCallCost(FunctionType *FTy, int NumArgs) override {
587 return Impl.getCallCost(FTy, NumArgs);
588 }
589 unsigned getCallCost(const Function *F, int NumArgs) override {
590 return Impl.getCallCost(F, NumArgs);
591 }
592 unsigned getCallCost(const Function *F,
593 ArrayRef<const Value *> Arguments) override {
594 return Impl.getCallCost(F, Arguments);
595 }
596 unsigned getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
597 ArrayRef<Type *> ParamTys) override {
598 return Impl.getIntrinsicCost(IID, RetTy, ParamTys);
599 }
600 unsigned getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
601 ArrayRef<const Value *> Arguments) override {
602 return Impl.getIntrinsicCost(IID, RetTy, Arguments);
603 }
604 unsigned getUserCost(const User *U) override { return Impl.getUserCost(U); }
605 bool hasBranchDivergence() override { return Impl.hasBranchDivergence(); }
606 bool isLoweredToCall(const Function *F) override {
607 return Impl.isLoweredToCall(F);
608 }
609 void getUnrollingPreferences(Loop *L, UnrollingPreferences &UP) override {
610 return Impl.getUnrollingPreferences(L, UP);
611 }
612 bool isLegalAddImmediate(int64_t Imm) override {
613 return Impl.isLegalAddImmediate(Imm);
614 }
615 bool isLegalICmpImmediate(int64_t Imm) override {
616 return Impl.isLegalICmpImmediate(Imm);
617 }
618 bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
619 bool HasBaseReg, int64_t Scale) override {
620 return Impl.isLegalAddressingMode(Ty, BaseGV, BaseOffset, HasBaseReg,
621 Scale);
622 }
623 bool isLegalMaskedStore(Type *DataType, int Consecutive) override {
624 return Impl.isLegalMaskedStore(DataType, Consecutive);
625 }
626 bool isLegalMaskedLoad(Type *DataType, int Consecutive) override {
627 return Impl.isLegalMaskedLoad(DataType, Consecutive);
628 }
629 int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
630 bool HasBaseReg, int64_t Scale) override {
631 return Impl.getScalingFactorCost(Ty, BaseGV, BaseOffset, HasBaseReg, Scale);
632 }
633 bool isTruncateFree(Type *Ty1, Type *Ty2) override {
634 return Impl.isTruncateFree(Ty1, Ty2);
635 }
636 bool isTypeLegal(Type *Ty) override { return Impl.isTypeLegal(Ty); }
637 unsigned getJumpBufAlignment() override { return Impl.getJumpBufAlignment(); }
638 unsigned getJumpBufSize() override { return Impl.getJumpBufSize(); }
639 bool shouldBuildLookupTables() override {
640 return Impl.shouldBuildLookupTables();
641 }
642 PopcntSupportKind getPopcntSupport(unsigned IntTyWidthInBit) override {
643 return Impl.getPopcntSupport(IntTyWidthInBit);
644 }
645 bool haveFastSqrt(Type *Ty) override { return Impl.haveFastSqrt(Ty); }
646
647 unsigned getFPOpCost(Type *Ty) override {
648 return Impl.getFPOpCost(Ty);
649 }
650
651 unsigned getIntImmCost(const APInt &Imm, Type *Ty) override {
652 return Impl.getIntImmCost(Imm, Ty);
653 }
654 unsigned getIntImmCost(unsigned Opc, unsigned Idx, const APInt &Imm,
655 Type *Ty) override {
656 return Impl.getIntImmCost(Opc, Idx, Imm, Ty);
657 }
658 unsigned getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
659 Type *Ty) override {
660 return Impl.getIntImmCost(IID, Idx, Imm, Ty);
661 }
662 unsigned getNumberOfRegisters(bool Vector) override {
663 return Impl.getNumberOfRegisters(Vector);
664 }
665 unsigned getRegisterBitWidth(bool Vector) override {
666 return Impl.getRegisterBitWidth(Vector);
667 }
668 unsigned getMaxInterleaveFactor() override {
669 return Impl.getMaxInterleaveFactor();
670 }
671 unsigned
672 getArithmeticInstrCost(unsigned Opcode, Type *Ty, OperandValueKind Opd1Info,
673 OperandValueKind Opd2Info,
674 OperandValueProperties Opd1PropInfo,
675 OperandValueProperties Opd2PropInfo) override {
676 return Impl.getArithmeticInstrCost(Opcode, Ty, Opd1Info, Opd2Info,
677 Opd1PropInfo, Opd2PropInfo);
678 }
679 unsigned getShuffleCost(ShuffleKind Kind, Type *Tp, int Index,
680 Type *SubTp) override {
681 return Impl.getShuffleCost(Kind, Tp, Index, SubTp);
682 }
683 unsigned getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) override {
684 return Impl.getCastInstrCost(Opcode, Dst, Src);
685 }
686 unsigned getCFInstrCost(unsigned Opcode) override {
687 return Impl.getCFInstrCost(Opcode);
688 }
689 unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
690 Type *CondTy) override {
691 return Impl.getCmpSelInstrCost(Opcode, ValTy, CondTy);
692 }
693 unsigned getVectorInstrCost(unsigned Opcode, Type *Val,
694 unsigned Index) override {
695 return Impl.getVectorInstrCost(Opcode, Val, Index);
696 }
697 unsigned getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
698 unsigned AddressSpace) override {
699 return Impl.getMemoryOpCost(Opcode, Src, Alignment, AddressSpace);
700 }
701 unsigned getMaskedMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
702 unsigned AddressSpace) override {
703 return Impl.getMaskedMemoryOpCost(Opcode, Src, Alignment, AddressSpace);
704 }
705 unsigned getReductionCost(unsigned Opcode, Type *Ty,
706 bool IsPairwiseForm) override {
707 return Impl.getReductionCost(Opcode, Ty, IsPairwiseForm);
708 }
709 unsigned getIntrinsicInstrCost(Intrinsic::ID ID, Type *RetTy,
710 ArrayRef<Type *> Tys) override {
711 return Impl.getIntrinsicInstrCost(ID, RetTy, Tys);
712 }
713 unsigned getNumberOfParts(Type *Tp) override {
714 return Impl.getNumberOfParts(Tp);
715 }
716 unsigned getAddressComputationCost(Type *Ty, bool IsComplex) override {
717 return Impl.getAddressComputationCost(Ty, IsComplex);
718 }
719 unsigned getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) override {
720 return Impl.getCostOfKeepingLiveOverCall(Tys);
721 }
722 bool getTgtMemIntrinsic(IntrinsicInst *Inst,
723 MemIntrinsicInfo &Info) override {
724 return Impl.getTgtMemIntrinsic(Inst, Info);
725 }
726 Value *getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
727 Type *ExpectedType) override {
728 return Impl.getOrCreateResultFromMemIntrinsic(Inst, ExpectedType);
729 }
730 };
731
732 template <typename T>
733 TargetTransformInfo::TargetTransformInfo(T Impl)
734 : TTIImpl(new Model<T>(Impl)) {}
735
736 /// \brief Analysis pass providing the \c TargetTransformInfo.
737 ///
738 /// The core idea of the TargetIRAnalysis is to expose an interface through
739 /// which LLVM targets can analyze and provide information about the middle
740 /// end's target-independent IR. This supports use cases such as target-aware
741 /// cost modeling of IR constructs.
742 ///
743 /// This is a function analysis because much of the cost modeling for targets
744 /// is done in a subtarget specific way and LLVM supports compiling different
745 /// functions targeting different subtargets in order to support runtime
746 /// dispatch according to the observed subtarget.
747 class TargetIRAnalysis {
748 public:
749 typedef TargetTransformInfo Result;
750
751 /// \brief Opaque, unique identifier for this analysis pass.
752 static void *ID() { return (void *)&PassID; }
753
754 /// \brief Provide access to a name for this pass for debugging purposes.
755 static StringRef name() { return "TargetIRAnalysis"; }
756
757 /// \brief Default construct a target IR analysis.
758 ///
759 /// This will use the module's datalayout to construct a baseline
760 /// conservative TTI result.
761 TargetIRAnalysis();
762
763 /// \brief Construct an IR analysis pass around a target-provide callback.
764 ///
765 /// The callback will be called with a particular function for which the TTI
766 /// is needed and must return a TTI object for that function.
767 TargetIRAnalysis(std::function<Result(Function &)> TTICallback);
768
769 // Value semantics. We spell out the constructors for MSVC.
770 TargetIRAnalysis(const TargetIRAnalysis &Arg)
771 : TTICallback(Arg.TTICallback) {}
772 TargetIRAnalysis(TargetIRAnalysis &&Arg)
773 : TTICallback(std::move(Arg.TTICallback)) {}
774 TargetIRAnalysis &operator=(const TargetIRAnalysis &RHS) {
775 TTICallback = RHS.TTICallback;
776 return *this;
777 }
778 TargetIRAnalysis &operator=(TargetIRAnalysis &&RHS) {
779 TTICallback = std::move(RHS.TTICallback);
780 return *this;
781 }
782
783 Result run(Function &F);
784
785 private:
786 static char PassID;
787
788 /// \brief The callback used to produce a result.
789 ///
790 /// We use a completely opaque callback so that targets can provide whatever
791 /// mechanism they desire for constructing the TTI for a given function.
792 ///
793 /// FIXME: Should we really use std::function? It's relatively inefficient.
794 /// It might be possible to arrange for even stateful callbacks to outlive
795 /// the analysis and thus use a function_ref which would be lighter weight.
796 /// This may also be less error prone as the callback is likely to reference
797 /// the external TargetMachine, and that reference needs to never dangle.
798 std::function<Result(Function &)> TTICallback;
799
800 /// \brief Helper function used as the callback in the default constructor.
801 static Result getDefaultTTI(Function &F);
802 };
803
804 /// \brief Wrapper pass for TargetTransformInfo.
805 ///
806 /// This pass can be constructed from a TTI object which it stores internally
807 /// and is queried by passes.
808 class TargetTransformInfoWrapperPass : public ImmutablePass {
809 TargetIRAnalysis TIRA;
810 Optional<TargetTransformInfo> TTI;
811
812 virtual void anchor();
813
814 public:
435 static char ID; 815 static char ID;
816
817 /// \brief We must provide a default constructor for the pass but it should
818 /// never be used.
819 ///
820 /// Use the constructor below or call one of the creation routines.
821 TargetTransformInfoWrapperPass();
822
823 explicit TargetTransformInfoWrapperPass(TargetIRAnalysis TIRA);
824
825 TargetTransformInfo &getTTI(Function &F);
436 }; 826 };
437 827
438 /// \brief Create the base case instance of a pass in the TTI analysis group. 828 /// \brief Create an analysis pass wrapper around a TTI object.
439 /// 829 ///
440 /// This class provides the base case for the stack of TTI analyzes. It doesn't 830 /// This analysis pass just holds the TTI instance and makes it available to
441 /// delegate to anything and uses the STTI and VTTI objects passed in to 831 /// clients.
442 /// satisfy the queries. 832 ImmutablePass *createTargetTransformInfoWrapperPass(TargetIRAnalysis TIRA);
443 ImmutablePass *createNoTargetTransformInfoPass();
444 833
445 } // End llvm namespace 834 } // End llvm namespace
446 835
447 #endif 836 #endif