comparison lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp @ 120:1172e4bd9c6f

update 4.0.0
author mir3636
date Fri, 25 Nov 2016 19:14:25 +0900
parents 7d135dc70f03
children 803732b1fca8
comparison
equal deleted inserted replaced
101:34baf5011add 120:1172e4bd9c6f
11 // Machine IR. 11 // Machine IR.
12 // 12 //
13 //===----------------------------------------------------------------------===// 13 //===----------------------------------------------------------------------===//
14 14
15 #include "llvm/CodeGen/FunctionLoweringInfo.h" 15 #include "llvm/CodeGen/FunctionLoweringInfo.h"
16 #include "llvm/ADT/PostOrderIterator.h"
17 #include "llvm/CodeGen/Analysis.h" 16 #include "llvm/CodeGen/Analysis.h"
18 #include "llvm/CodeGen/MachineFrameInfo.h" 17 #include "llvm/CodeGen/MachineFrameInfo.h"
19 #include "llvm/CodeGen/MachineFunction.h" 18 #include "llvm/CodeGen/MachineFunction.h"
20 #include "llvm/CodeGen/MachineInstrBuilder.h" 19 #include "llvm/CodeGen/MachineInstrBuilder.h"
21 #include "llvm/CodeGen/MachineModuleInfo.h" 20 #include "llvm/CodeGen/MachineModuleInfo.h"
86 MF = &mf; 85 MF = &mf;
87 TLI = MF->getSubtarget().getTargetLowering(); 86 TLI = MF->getSubtarget().getTargetLowering();
88 RegInfo = &MF->getRegInfo(); 87 RegInfo = &MF->getRegInfo();
89 MachineModuleInfo &MMI = MF->getMMI(); 88 MachineModuleInfo &MMI = MF->getMMI();
90 const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering(); 89 const TargetFrameLowering *TFI = MF->getSubtarget().getFrameLowering();
90 unsigned StackAlign = TFI->getStackAlignment();
91 91
92 // Check whether the function can return without sret-demotion. 92 // Check whether the function can return without sret-demotion.
93 SmallVector<ISD::OutputArg, 4> Outs; 93 SmallVector<ISD::OutputArg, 4> Outs;
94 GetReturnInfo(Fn->getReturnType(), Fn->getAttributes(), Outs, *TLI, 94 GetReturnInfo(Fn->getReturnType(), Fn->getAttributes(), Outs, *TLI,
95 mf.getDataLayout()); 95 mf.getDataLayout());
96 CanLowerReturn = TLI->CanLowerReturn(Fn->getCallingConv(), *MF, 96 CanLowerReturn = TLI->CanLowerReturn(Fn->getCallingConv(), *MF,
97 Fn->isVarArg(), Outs, Fn->getContext()); 97 Fn->isVarArg(), Outs, Fn->getContext());
98
99 // If this personality uses funclets, we need to do a bit more work.
100 DenseMap<const AllocaInst *, TinyPtrVector<int *>> CatchObjects;
101 EHPersonality Personality = classifyEHPersonality(
102 Fn->hasPersonalityFn() ? Fn->getPersonalityFn() : nullptr);
103 if (isFuncletEHPersonality(Personality)) {
104 // Calculate state numbers if we haven't already.
105 WinEHFuncInfo &EHInfo = *MF->getWinEHFuncInfo();
106 if (Personality == EHPersonality::MSVC_CXX)
107 calculateWinCXXEHStateNumbers(&fn, EHInfo);
108 else if (isAsynchronousEHPersonality(Personality))
109 calculateSEHStateNumbers(&fn, EHInfo);
110 else if (Personality == EHPersonality::CoreCLR)
111 calculateClrEHStateNumbers(&fn, EHInfo);
112
113 // Map all BB references in the WinEH data to MBBs.
114 for (WinEHTryBlockMapEntry &TBME : EHInfo.TryBlockMap) {
115 for (WinEHHandlerType &H : TBME.HandlerArray) {
116 if (const AllocaInst *AI = H.CatchObj.Alloca)
117 CatchObjects.insert({AI, {}}).first->second.push_back(
118 &H.CatchObj.FrameIndex);
119 else
120 H.CatchObj.FrameIndex = INT_MAX;
121 }
122 }
123 }
98 124
99 // Initialize the mapping of values to registers. This is only set up for 125 // Initialize the mapping of values to registers. This is only set up for
100 // instruction values that are used outside of the block that defines 126 // instruction values that are used outside of the block that defines
101 // them. 127 // them.
102 Function::const_iterator BB = Fn->begin(), EB = Fn->end(); 128 Function::const_iterator BB = Fn->begin(), EB = Fn->end();
106 if (const AllocaInst *AI = dyn_cast<AllocaInst>(I)) { 132 if (const AllocaInst *AI = dyn_cast<AllocaInst>(I)) {
107 Type *Ty = AI->getAllocatedType(); 133 Type *Ty = AI->getAllocatedType();
108 unsigned Align = 134 unsigned Align =
109 std::max((unsigned)MF->getDataLayout().getPrefTypeAlignment(Ty), 135 std::max((unsigned)MF->getDataLayout().getPrefTypeAlignment(Ty),
110 AI->getAlignment()); 136 AI->getAlignment());
111 unsigned StackAlign = TFI->getStackAlignment();
112 137
113 // Static allocas can be folded into the initial stack frame 138 // Static allocas can be folded into the initial stack frame
114 // adjustment. For targets that don't realign the stack, don't 139 // adjustment. For targets that don't realign the stack, don't
115 // do this if there is an extra alignment requirement. 140 // do this if there is an extra alignment requirement.
116 if (AI->isStaticAlloca() && 141 if (AI->isStaticAlloca() &&
118 const ConstantInt *CUI = cast<ConstantInt>(AI->getArraySize()); 143 const ConstantInt *CUI = cast<ConstantInt>(AI->getArraySize());
119 uint64_t TySize = MF->getDataLayout().getTypeAllocSize(Ty); 144 uint64_t TySize = MF->getDataLayout().getTypeAllocSize(Ty);
120 145
121 TySize *= CUI->getZExtValue(); // Get total allocated size. 146 TySize *= CUI->getZExtValue(); // Get total allocated size.
122 if (TySize == 0) TySize = 1; // Don't create zero-sized stack objects. 147 if (TySize == 0) TySize = 1; // Don't create zero-sized stack objects.
123 148 int FrameIndex = INT_MAX;
124 StaticAllocaMap[AI] = 149 auto Iter = CatchObjects.find(AI);
125 MF->getFrameInfo()->CreateStackObject(TySize, Align, false, AI); 150 if (Iter != CatchObjects.end() && TLI->needsFixedCatchObjects()) {
151 FrameIndex = MF->getFrameInfo().CreateFixedObject(
152 TySize, 0, /*Immutable=*/false, /*isAliased=*/true);
153 MF->getFrameInfo().setObjectAlignment(FrameIndex, Align);
154 } else {
155 FrameIndex =
156 MF->getFrameInfo().CreateStackObject(TySize, Align, false, AI);
157 }
158
159 StaticAllocaMap[AI] = FrameIndex;
160 // Update the catch handler information.
161 if (Iter != CatchObjects.end()) {
162 for (int *CatchObjPtr : Iter->second)
163 *CatchObjPtr = FrameIndex;
164 }
126 } else { 165 } else {
127 // FIXME: Overaligned static allocas should be grouped into 166 // FIXME: Overaligned static allocas should be grouped into
128 // a single dynamic allocation instead of using a separate 167 // a single dynamic allocation instead of using a separate
129 // stack allocation for each one. 168 // stack allocation for each one.
130 if (Align <= StackAlign) 169 if (Align <= StackAlign)
131 Align = 0; 170 Align = 0;
132 // Inform the Frame Information that we have variable-sized objects. 171 // Inform the Frame Information that we have variable-sized objects.
133 MF->getFrameInfo()->CreateVariableSizedObject(Align ? Align : 1, AI); 172 MF->getFrameInfo().CreateVariableSizedObject(Align ? Align : 1, AI);
134 } 173 }
135 } 174 }
136 175
137 // Look for inline asm that clobbers the SP register. 176 // Look for inline asm that clobbers the SP register.
138 if (isa<CallInst>(I) || isa<InvokeInst>(I)) { 177 if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
149 TLI->ComputeConstraintToUse(Op, SDValue(), DAG); 188 TLI->ComputeConstraintToUse(Op, SDValue(), DAG);
150 std::pair<unsigned, const TargetRegisterClass *> PhysReg = 189 std::pair<unsigned, const TargetRegisterClass *> PhysReg =
151 TLI->getRegForInlineAsmConstraint(TRI, Op.ConstraintCode, 190 TLI->getRegForInlineAsmConstraint(TRI, Op.ConstraintCode,
152 Op.ConstraintVT); 191 Op.ConstraintVT);
153 if (PhysReg.first == SP) 192 if (PhysReg.first == SP)
154 MF->getFrameInfo()->setHasOpaqueSPAdjustment(true); 193 MF->getFrameInfo().setHasOpaqueSPAdjustment(true);
155 } 194 }
156 } 195 }
157 } 196 }
158 } 197 }
159 198
160 // Look for calls to the @llvm.va_start intrinsic. We can omit some 199 // Look for calls to the @llvm.va_start intrinsic. We can omit some
161 // prologue boilerplate for variadic functions that don't examine their 200 // prologue boilerplate for variadic functions that don't examine their
162 // arguments. 201 // arguments.
163 if (const auto *II = dyn_cast<IntrinsicInst>(I)) { 202 if (const auto *II = dyn_cast<IntrinsicInst>(I)) {
164 if (II->getIntrinsicID() == Intrinsic::vastart) 203 if (II->getIntrinsicID() == Intrinsic::vastart)
165 MF->getFrameInfo()->setHasVAStart(true); 204 MF->getFrameInfo().setHasVAStart(true);
166 } 205 }
167 206
168 // If we have a musttail call in a variadic function, we need to ensure we 207 // If we have a musttail call in a variadic function, we need to ensure we
169 // forward implicit register parameters. 208 // forward implicit register parameters.
170 if (const auto *CI = dyn_cast<CallInst>(I)) { 209 if (const auto *CI = dyn_cast<CallInst>(I)) {
171 if (CI->isMustTailCall() && Fn->isVarArg()) 210 if (CI->isMustTailCall() && Fn->isVarArg())
172 MF->getFrameInfo()->setHasMustTailInVarArgFunc(true); 211 MF->getFrameInfo().setHasMustTailInVarArgFunc(true);
173 } 212 }
174 213
175 // Mark values used outside their block as exported, by allocating 214 // Mark values used outside their block as exported, by allocating
176 // a virtual register for them. 215 // a virtual register for them.
177 if (isUsedOutsideOfDefiningBlock(&*I)) 216 if (isUsedOutsideOfDefiningBlock(&*I))
221 // funclets. 260 // funclets.
222 // FIXME: SEH catchpads do not create funclets, so we could avoid setting 261 // FIXME: SEH catchpads do not create funclets, so we could avoid setting
223 // this in such cases in order to improve frame layout. 262 // this in such cases in order to improve frame layout.
224 if (!isa<LandingPadInst>(I)) { 263 if (!isa<LandingPadInst>(I)) {
225 MMI.setHasEHFunclets(true); 264 MMI.setHasEHFunclets(true);
226 MF->getFrameInfo()->setHasOpaqueSPAdjustment(true); 265 MF->getFrameInfo().setHasOpaqueSPAdjustment(true);
227 } 266 }
228 if (isa<CatchSwitchInst>(I)) { 267 if (isa<CatchSwitchInst>(I)) {
229 assert(&*BB->begin() == I && 268 assert(&*BB->begin() == I &&
230 "WinEHPrepare failed to remove PHIs from imaginary BBs"); 269 "WinEHPrepare failed to remove PHIs from imaginary BBs");
231 continue; 270 continue;
279 MBBMap[&*BB]->setIsEHPad(); 318 MBBMap[&*BB]->setIsEHPad();
280 if (const auto *LPI = dyn_cast<LandingPadInst>(FNP)) 319 if (const auto *LPI = dyn_cast<LandingPadInst>(FNP))
281 LPads.push_back(LPI); 320 LPads.push_back(LPI);
282 } 321 }
283 322
284 // If this personality uses funclets, we need to do a bit more work.
285 if (!Fn->hasPersonalityFn())
286 return;
287 EHPersonality Personality = classifyEHPersonality(Fn->getPersonalityFn());
288 if (!isFuncletEHPersonality(Personality)) 323 if (!isFuncletEHPersonality(Personality))
289 return; 324 return;
290 325
291 // Calculate state numbers if we haven't already.
292 WinEHFuncInfo &EHInfo = *MF->getWinEHFuncInfo(); 326 WinEHFuncInfo &EHInfo = *MF->getWinEHFuncInfo();
293 if (Personality == EHPersonality::MSVC_CXX)
294 calculateWinCXXEHStateNumbers(&fn, EHInfo);
295 else if (isAsynchronousEHPersonality(Personality))
296 calculateSEHStateNumbers(&fn, EHInfo);
297 else if (Personality == EHPersonality::CoreCLR)
298 calculateClrEHStateNumbers(&fn, EHInfo);
299 327
300 // Map all BB references in the WinEH data to MBBs. 328 // Map all BB references in the WinEH data to MBBs.
301 for (WinEHTryBlockMapEntry &TBME : EHInfo.TryBlockMap) { 329 for (WinEHTryBlockMapEntry &TBME : EHInfo.TryBlockMap) {
302 for (WinEHHandlerType &H : TBME.HandlerArray) { 330 for (WinEHHandlerType &H : TBME.HandlerArray) {
303 if (H.CatchObj.Alloca) {
304 assert(StaticAllocaMap.count(H.CatchObj.Alloca));
305 H.CatchObj.FrameIndex = StaticAllocaMap[H.CatchObj.Alloca];
306 } else {
307 H.CatchObj.FrameIndex = INT_MAX;
308 }
309 if (H.Handler) 331 if (H.Handler)
310 H.Handler = MBBMap[H.Handler.get<const BasicBlock *>()]; 332 H.Handler = MBBMap[H.Handler.get<const BasicBlock *>()];
311 } 333 }
312 } 334 }
313 for (CxxUnwindMapEntry &UME : EHInfo.CxxUnwindMap) 335 for (CxxUnwindMapEntry &UME : EHInfo.CxxUnwindMap)
334 VisitedBBs.clear(); 356 VisitedBBs.clear();
335 ArgDbgValues.clear(); 357 ArgDbgValues.clear();
336 ByValArgFrameIndexMap.clear(); 358 ByValArgFrameIndexMap.clear();
337 RegFixups.clear(); 359 RegFixups.clear();
338 StatepointStackSlots.clear(); 360 StatepointStackSlots.clear();
339 StatepointRelocatedValues.clear(); 361 StatepointSpillMaps.clear();
340 PreferredExtendType.clear(); 362 PreferredExtendType.clear();
341 } 363 }
342 364
343 /// CreateReg - Allocate a single virtual register for the given type. 365 /// CreateReg - Allocate a single virtual register for the given type.
344 unsigned FunctionLoweringInfo::CreateReg(MVT VT) { 366 unsigned FunctionLoweringInfo::CreateReg(MVT VT) {
519 VReg = MRI.createVirtualRegister(RC); 541 VReg = MRI.createVirtualRegister(RC);
520 assert(VReg && "null vreg in exception pointer table!"); 542 assert(VReg && "null vreg in exception pointer table!");
521 return VReg; 543 return VReg;
522 } 544 }
523 545
524 /// ComputeUsesVAFloatArgument - Determine if any floating-point values are 546 unsigned
525 /// being passed to this variadic function, and set the MachineModuleInfo's 547 FunctionLoweringInfo::getOrCreateSwiftErrorVReg(const MachineBasicBlock *MBB,
526 /// usesVAFloatArgument flag if so. This flag is used to emit an undefined 548 const Value *Val) {
527 /// reference to _fltused on Windows, which will link in MSVCRT's 549 auto Key = std::make_pair(MBB, Val);
528 /// floating-point support. 550 auto It = SwiftErrorVRegDefMap.find(Key);
529 void llvm::ComputeUsesVAFloatArgument(const CallInst &I, 551 // If this is the first use of this swifterror value in this basic block,
530 MachineModuleInfo *MMI) 552 // create a new virtual register.
531 { 553 // After we processed all basic blocks we will satisfy this "upwards exposed
532 FunctionType *FT = cast<FunctionType>( 554 // use" by inserting a copy or phi at the beginning of this block.
533 I.getCalledValue()->getType()->getContainedType(0)); 555 if (It == SwiftErrorVRegDefMap.end()) {
534 if (FT->isVarArg() && !MMI->usesVAFloatArgument()) { 556 auto &DL = MF->getDataLayout();
535 for (unsigned i = 0, e = I.getNumArgOperands(); i != e; ++i) { 557 const TargetRegisterClass *RC = TLI->getRegClassFor(TLI->getPointerTy(DL));
536 Type* T = I.getArgOperand(i)->getType(); 558 auto VReg = MF->getRegInfo().createVirtualRegister(RC);
537 for (auto i : post_order(T)) { 559 SwiftErrorVRegDefMap[Key] = VReg;
538 if (i->isFloatingPointTy()) { 560 SwiftErrorVRegUpwardsUse[Key] = VReg;
539 MMI->setUsesVAFloatArgument(true); 561 return VReg;
540 return; 562 } else return It->second;
541 } 563 }
542 } 564
543 } 565 void FunctionLoweringInfo::setCurrentSwiftErrorVReg(
544 } 566 const MachineBasicBlock *MBB, const Value *Val, unsigned VReg) {
545 } 567 SwiftErrorVRegDefMap[std::make_pair(MBB, Val)] = VReg;
546 568 }
547 /// AddLandingPadInfo - Extract the exception handling information from the
548 /// landingpad instruction and add them to the specified machine module info.
549 void llvm::AddLandingPadInfo(const LandingPadInst &I, MachineModuleInfo &MMI,
550 MachineBasicBlock *MBB) {
551 if (const auto *PF = dyn_cast<Function>(
552 I.getParent()->getParent()->getPersonalityFn()->stripPointerCasts()))
553 MMI.addPersonality(PF);
554
555 if (I.isCleanup())
556 MMI.addCleanup(MBB);
557
558 // FIXME: New EH - Add the clauses in reverse order. This isn't 100% correct,
559 // but we need to do it this way because of how the DWARF EH emitter
560 // processes the clauses.
561 for (unsigned i = I.getNumClauses(); i != 0; --i) {
562 Value *Val = I.getClause(i - 1);
563 if (I.isCatch(i - 1)) {
564 MMI.addCatchTypeInfo(MBB,
565 dyn_cast<GlobalValue>(Val->stripPointerCasts()));
566 } else {
567 // Add filters in a list.
568 Constant *CVal = cast<Constant>(Val);
569 SmallVector<const GlobalValue*, 4> FilterList;
570 for (User::op_iterator
571 II = CVal->op_begin(), IE = CVal->op_end(); II != IE; ++II)
572 FilterList.push_back(cast<GlobalValue>((*II)->stripPointerCasts()));
573
574 MMI.addFilterTypeInfo(MBB, FilterList);
575 }
576 }
577 }