83
|
1 //===-- GCRootLowering.cpp - Garbage collection infrastructure ------------===//
|
|
2 //
|
|
3 // The LLVM Compiler Infrastructure
|
|
4 //
|
|
5 // This file is distributed under the University of Illinois Open Source
|
|
6 // License. See LICENSE.TXT for details.
|
|
7 //
|
|
8 //===----------------------------------------------------------------------===//
|
|
9 //
|
|
10 // This file implements the lowering for the gc.root mechanism.
|
|
11 //
|
|
12 //===----------------------------------------------------------------------===//
|
|
13
|
|
14 #include "llvm/CodeGen/GCMetadata.h"
|
|
15 #include "llvm/CodeGen/GCStrategy.h"
|
|
16 #include "llvm/CodeGen/MachineFrameInfo.h"
|
|
17 #include "llvm/CodeGen/MachineFunctionPass.h"
|
|
18 #include "llvm/CodeGen/MachineInstrBuilder.h"
|
|
19 #include "llvm/CodeGen/MachineModuleInfo.h"
|
|
20 #include "llvm/CodeGen/Passes.h"
|
134
|
21 #include "llvm/CodeGen/TargetFrameLowering.h"
|
|
22 #include "llvm/CodeGen/TargetInstrInfo.h"
|
|
23 #include "llvm/CodeGen/TargetRegisterInfo.h"
|
|
24 #include "llvm/CodeGen/TargetSubtargetInfo.h"
|
83
|
25 #include "llvm/IR/Dominators.h"
|
|
26 #include "llvm/IR/IntrinsicInst.h"
|
|
27 #include "llvm/IR/Module.h"
|
|
28 #include "llvm/Support/Debug.h"
|
|
29 #include "llvm/Support/ErrorHandling.h"
|
|
30 #include "llvm/Support/raw_ostream.h"
|
|
31
|
|
32 using namespace llvm;
|
|
33
|
|
34 namespace {
|
|
35
|
|
36 /// LowerIntrinsics - This pass rewrites calls to the llvm.gcread or
|
|
37 /// llvm.gcwrite intrinsics, replacing them with simple loads and stores as
|
|
38 /// directed by the GCStrategy. It also performs automatic root initialization
|
|
39 /// and custom intrinsic lowering.
|
|
40 class LowerIntrinsics : public FunctionPass {
|
|
41 bool PerformDefaultLowering(Function &F, GCStrategy &Coll);
|
|
42
|
|
43 public:
|
|
44 static char ID;
|
|
45
|
|
46 LowerIntrinsics();
|
120
|
47 StringRef getPassName() const override;
|
83
|
48 void getAnalysisUsage(AnalysisUsage &AU) const override;
|
|
49
|
|
50 bool doInitialization(Module &M) override;
|
|
51 bool runOnFunction(Function &F) override;
|
|
52 };
|
|
53
|
|
54 /// GCMachineCodeAnalysis - This is a target-independent pass over the machine
|
|
55 /// function representation to identify safe points for the garbage collector
|
|
56 /// in the machine code. It inserts labels at safe points and populates a
|
|
57 /// GCMetadata record for each function.
|
|
58 class GCMachineCodeAnalysis : public MachineFunctionPass {
|
|
59 GCFunctionInfo *FI;
|
|
60 MachineModuleInfo *MMI;
|
|
61 const TargetInstrInfo *TII;
|
|
62
|
|
63 void FindSafePoints(MachineFunction &MF);
|
|
64 void VisitCallPoint(MachineBasicBlock::iterator MI);
|
|
65 MCSymbol *InsertLabel(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
|
120
|
66 const DebugLoc &DL) const;
|
83
|
67
|
|
68 void FindStackOffsets(MachineFunction &MF);
|
|
69
|
|
70 public:
|
|
71 static char ID;
|
|
72
|
|
73 GCMachineCodeAnalysis();
|
|
74 void getAnalysisUsage(AnalysisUsage &AU) const override;
|
|
75
|
|
76 bool runOnMachineFunction(MachineFunction &MF) override;
|
|
77 };
|
|
78 }
|
|
79
|
|
80 // -----------------------------------------------------------------------------
|
|
81
|
|
82 INITIALIZE_PASS_BEGIN(LowerIntrinsics, "gc-lowering", "GC Lowering", false,
|
|
83 false)
|
|
84 INITIALIZE_PASS_DEPENDENCY(GCModuleInfo)
|
|
85 INITIALIZE_PASS_END(LowerIntrinsics, "gc-lowering", "GC Lowering", false, false)
|
|
86
|
|
87 FunctionPass *llvm::createGCLoweringPass() { return new LowerIntrinsics(); }
|
|
88
|
|
89 char LowerIntrinsics::ID = 0;
|
|
90
|
|
91 LowerIntrinsics::LowerIntrinsics() : FunctionPass(ID) {
|
|
92 initializeLowerIntrinsicsPass(*PassRegistry::getPassRegistry());
|
|
93 }
|
|
94
|
120
|
95 StringRef LowerIntrinsics::getPassName() const {
|
83
|
96 return "Lower Garbage Collection Instructions";
|
|
97 }
|
|
98
|
|
99 void LowerIntrinsics::getAnalysisUsage(AnalysisUsage &AU) const {
|
|
100 FunctionPass::getAnalysisUsage(AU);
|
|
101 AU.addRequired<GCModuleInfo>();
|
|
102 AU.addPreserved<DominatorTreeWrapperPass>();
|
|
103 }
|
|
104
|
|
105 static bool NeedsDefaultLoweringPass(const GCStrategy &C) {
|
|
106 // Default lowering is necessary only if read or write barriers have a default
|
|
107 // action. The default for roots is no action.
|
|
108 return !C.customWriteBarrier() || !C.customReadBarrier() ||
|
|
109 C.initializeRoots();
|
|
110 }
|
|
111
|
|
112 /// doInitialization - If this module uses the GC intrinsics, find them now.
|
|
113 bool LowerIntrinsics::doInitialization(Module &M) {
|
|
114 GCModuleInfo *MI = getAnalysisIfAvailable<GCModuleInfo>();
|
|
115 assert(MI && "LowerIntrinsics didn't require GCModuleInfo!?");
|
|
116 for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I)
|
|
117 if (!I->isDeclaration() && I->hasGC())
|
|
118 MI->getFunctionInfo(*I); // Instantiate the GC strategy.
|
|
119
|
|
120 return false;
|
|
121 }
|
|
122
|
|
123 /// CouldBecomeSafePoint - Predicate to conservatively determine whether the
|
|
124 /// instruction could introduce a safe point.
|
|
125 static bool CouldBecomeSafePoint(Instruction *I) {
|
|
126 // The natural definition of instructions which could introduce safe points
|
|
127 // are:
|
|
128 //
|
|
129 // - call, invoke (AfterCall, BeforeCall)
|
|
130 // - phis (Loops)
|
|
131 // - invoke, ret, unwind (Exit)
|
|
132 //
|
|
133 // However, instructions as seemingly inoccuous as arithmetic can become
|
|
134 // libcalls upon lowering (e.g., div i64 on a 32-bit platform), so instead
|
|
135 // it is necessary to take a conservative approach.
|
|
136
|
|
137 if (isa<AllocaInst>(I) || isa<GetElementPtrInst>(I) || isa<StoreInst>(I) ||
|
|
138 isa<LoadInst>(I))
|
|
139 return false;
|
|
140
|
|
141 // llvm.gcroot is safe because it doesn't do anything at runtime.
|
|
142 if (CallInst *CI = dyn_cast<CallInst>(I))
|
|
143 if (Function *F = CI->getCalledFunction())
|
95
|
144 if (Intrinsic::ID IID = F->getIntrinsicID())
|
83
|
145 if (IID == Intrinsic::gcroot)
|
|
146 return false;
|
|
147
|
|
148 return true;
|
|
149 }
|
|
150
|
|
151 static bool InsertRootInitializers(Function &F, AllocaInst **Roots,
|
|
152 unsigned Count) {
|
|
153 // Scroll past alloca instructions.
|
|
154 BasicBlock::iterator IP = F.getEntryBlock().begin();
|
|
155 while (isa<AllocaInst>(IP))
|
|
156 ++IP;
|
|
157
|
|
158 // Search for initializers in the initial BB.
|
|
159 SmallPtrSet<AllocaInst *, 16> InitedRoots;
|
95
|
160 for (; !CouldBecomeSafePoint(&*IP); ++IP)
|
83
|
161 if (StoreInst *SI = dyn_cast<StoreInst>(IP))
|
|
162 if (AllocaInst *AI =
|
|
163 dyn_cast<AllocaInst>(SI->getOperand(1)->stripPointerCasts()))
|
|
164 InitedRoots.insert(AI);
|
|
165
|
|
166 // Add root initializers.
|
|
167 bool MadeChange = false;
|
|
168
|
|
169 for (AllocaInst **I = Roots, **E = Roots + Count; I != E; ++I)
|
|
170 if (!InitedRoots.count(*I)) {
|
|
171 StoreInst *SI = new StoreInst(
|
100
|
172 ConstantPointerNull::get(cast<PointerType>((*I)->getAllocatedType())),
|
83
|
173 *I);
|
|
174 SI->insertAfter(*I);
|
|
175 MadeChange = true;
|
|
176 }
|
|
177
|
|
178 return MadeChange;
|
|
179 }
|
|
180
|
|
181 /// runOnFunction - Replace gcread/gcwrite intrinsics with loads and stores.
|
|
182 /// Leave gcroot intrinsics; the code generator needs to see those.
|
|
183 bool LowerIntrinsics::runOnFunction(Function &F) {
|
|
184 // Quick exit for functions that do not use GC.
|
|
185 if (!F.hasGC())
|
|
186 return false;
|
|
187
|
|
188 GCFunctionInfo &FI = getAnalysis<GCModuleInfo>().getFunctionInfo(F);
|
|
189 GCStrategy &S = FI.getStrategy();
|
|
190
|
|
191 bool MadeChange = false;
|
|
192
|
|
193 if (NeedsDefaultLoweringPass(S))
|
|
194 MadeChange |= PerformDefaultLowering(F, S);
|
|
195
|
|
196 return MadeChange;
|
|
197 }
|
|
198
|
|
199 bool LowerIntrinsics::PerformDefaultLowering(Function &F, GCStrategy &S) {
|
|
200 bool LowerWr = !S.customWriteBarrier();
|
|
201 bool LowerRd = !S.customReadBarrier();
|
|
202 bool InitRoots = S.initializeRoots();
|
|
203
|
|
204 SmallVector<AllocaInst *, 32> Roots;
|
|
205
|
|
206 bool MadeChange = false;
|
|
207 for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) {
|
|
208 for (BasicBlock::iterator II = BB->begin(), E = BB->end(); II != E;) {
|
|
209 if (IntrinsicInst *CI = dyn_cast<IntrinsicInst>(II++)) {
|
|
210 Function *F = CI->getCalledFunction();
|
|
211 switch (F->getIntrinsicID()) {
|
|
212 case Intrinsic::gcwrite:
|
|
213 if (LowerWr) {
|
|
214 // Replace a write barrier with a simple store.
|
|
215 Value *St =
|
|
216 new StoreInst(CI->getArgOperand(0), CI->getArgOperand(2), CI);
|
|
217 CI->replaceAllUsesWith(St);
|
|
218 CI->eraseFromParent();
|
|
219 }
|
|
220 break;
|
|
221 case Intrinsic::gcread:
|
|
222 if (LowerRd) {
|
|
223 // Replace a read barrier with a simple load.
|
|
224 Value *Ld = new LoadInst(CI->getArgOperand(1), "", CI);
|
|
225 Ld->takeName(CI);
|
|
226 CI->replaceAllUsesWith(Ld);
|
|
227 CI->eraseFromParent();
|
|
228 }
|
|
229 break;
|
|
230 case Intrinsic::gcroot:
|
|
231 if (InitRoots) {
|
|
232 // Initialize the GC root, but do not delete the intrinsic. The
|
|
233 // backend needs the intrinsic to flag the stack slot.
|
|
234 Roots.push_back(
|
|
235 cast<AllocaInst>(CI->getArgOperand(0)->stripPointerCasts()));
|
|
236 }
|
|
237 break;
|
|
238 default:
|
|
239 continue;
|
|
240 }
|
|
241
|
|
242 MadeChange = true;
|
|
243 }
|
|
244 }
|
|
245 }
|
|
246
|
|
247 if (Roots.size())
|
|
248 MadeChange |= InsertRootInitializers(F, Roots.begin(), Roots.size());
|
|
249
|
|
250 return MadeChange;
|
|
251 }
|
|
252
|
|
253 // -----------------------------------------------------------------------------
|
|
254
|
|
255 char GCMachineCodeAnalysis::ID = 0;
|
|
256 char &llvm::GCMachineCodeAnalysisID = GCMachineCodeAnalysis::ID;
|
|
257
|
|
258 INITIALIZE_PASS(GCMachineCodeAnalysis, "gc-analysis",
|
|
259 "Analyze Machine Code For Garbage Collection", false, false)
|
|
260
|
|
261 GCMachineCodeAnalysis::GCMachineCodeAnalysis() : MachineFunctionPass(ID) {}
|
|
262
|
|
263 void GCMachineCodeAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
|
|
264 MachineFunctionPass::getAnalysisUsage(AU);
|
|
265 AU.setPreservesAll();
|
|
266 AU.addRequired<MachineModuleInfo>();
|
|
267 AU.addRequired<GCModuleInfo>();
|
|
268 }
|
|
269
|
|
270 MCSymbol *GCMachineCodeAnalysis::InsertLabel(MachineBasicBlock &MBB,
|
|
271 MachineBasicBlock::iterator MI,
|
120
|
272 const DebugLoc &DL) const {
|
95
|
273 MCSymbol *Label = MBB.getParent()->getContext().createTempSymbol();
|
83
|
274 BuildMI(MBB, MI, DL, TII->get(TargetOpcode::GC_LABEL)).addSym(Label);
|
|
275 return Label;
|
|
276 }
|
|
277
|
|
278 void GCMachineCodeAnalysis::VisitCallPoint(MachineBasicBlock::iterator CI) {
|
|
279 // Find the return address (next instruction), too, so as to bracket the call
|
|
280 // instruction.
|
|
281 MachineBasicBlock::iterator RAI = CI;
|
|
282 ++RAI;
|
|
283
|
|
284 if (FI->getStrategy().needsSafePoint(GC::PreCall)) {
|
|
285 MCSymbol *Label = InsertLabel(*CI->getParent(), CI, CI->getDebugLoc());
|
|
286 FI->addSafePoint(GC::PreCall, Label, CI->getDebugLoc());
|
|
287 }
|
|
288
|
|
289 if (FI->getStrategy().needsSafePoint(GC::PostCall)) {
|
|
290 MCSymbol *Label = InsertLabel(*CI->getParent(), RAI, CI->getDebugLoc());
|
|
291 FI->addSafePoint(GC::PostCall, Label, CI->getDebugLoc());
|
|
292 }
|
|
293 }
|
|
294
|
|
295 void GCMachineCodeAnalysis::FindSafePoints(MachineFunction &MF) {
|
|
296 for (MachineFunction::iterator BBI = MF.begin(), BBE = MF.end(); BBI != BBE;
|
|
297 ++BBI)
|
|
298 for (MachineBasicBlock::iterator MI = BBI->begin(), ME = BBI->end();
|
|
299 MI != ME; ++MI)
|
|
300 if (MI->isCall()) {
|
|
301 // Do not treat tail or sibling call sites as safe points. This is
|
|
302 // legal since any arguments passed to the callee which live in the
|
|
303 // remnants of the callers frame will be owned and updated by the
|
|
304 // callee if required.
|
|
305 if (MI->isTerminator())
|
|
306 continue;
|
|
307 VisitCallPoint(MI);
|
|
308 }
|
|
309 }
|
|
310
|
|
311 void GCMachineCodeAnalysis::FindStackOffsets(MachineFunction &MF) {
|
95
|
312 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
|
83
|
313 assert(TFI && "TargetRegisterInfo not available!");
|
|
314
|
|
315 for (GCFunctionInfo::roots_iterator RI = FI->roots_begin();
|
|
316 RI != FI->roots_end();) {
|
|
317 // If the root references a dead object, no need to keep it.
|
120
|
318 if (MF.getFrameInfo().isDeadObjectIndex(RI->Num)) {
|
83
|
319 RI = FI->removeStackRoot(RI);
|
|
320 } else {
|
95
|
321 unsigned FrameReg; // FIXME: surely GCRoot ought to store the
|
|
322 // register that the offset is from?
|
|
323 RI->StackOffset = TFI->getFrameIndexReference(MF, RI->Num, FrameReg);
|
83
|
324 ++RI;
|
|
325 }
|
|
326 }
|
|
327 }
|
|
328
|
|
329 bool GCMachineCodeAnalysis::runOnMachineFunction(MachineFunction &MF) {
|
|
330 // Quick exit for functions that do not use GC.
|
134
|
331 if (!MF.getFunction().hasGC())
|
83
|
332 return false;
|
|
333
|
134
|
334 FI = &getAnalysis<GCModuleInfo>().getFunctionInfo(MF.getFunction());
|
95
|
335 MMI = &getAnalysis<MachineModuleInfo>();
|
|
336 TII = MF.getSubtarget().getInstrInfo();
|
83
|
337
|
95
|
338 // Find the size of the stack frame. There may be no correct static frame
|
|
339 // size, we use UINT64_MAX to represent this.
|
120
|
340 const MachineFrameInfo &MFI = MF.getFrameInfo();
|
95
|
341 const TargetRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo();
|
120
|
342 const bool DynamicFrameSize = MFI.hasVarSizedObjects() ||
|
95
|
343 RegInfo->needsStackRealignment(MF);
|
120
|
344 FI->setFrameSize(DynamicFrameSize ? UINT64_MAX : MFI.getStackSize());
|
83
|
345
|
|
346 // Find all safe points.
|
95
|
347 if (FI->getStrategy().needsSafePoints())
|
|
348 FindSafePoints(MF);
|
83
|
349
|
95
|
350 // Find the concrete stack offsets for all roots (stack slots)
|
83
|
351 FindStackOffsets(MF);
|
|
352
|
|
353 return false;
|
|
354 }
|