comparison lib/CodeGen/SafeStack.cpp @ 120:1172e4bd9c6f

update 4.0.0
author mir3636
date Fri, 25 Nov 2016 19:14:25 +0900
parents
children 803732b1fca8
comparison
equal deleted inserted replaced
101:34baf5011add 120:1172e4bd9c6f
1 //===-- SafeStack.cpp - Safe Stack Insertion ------------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This pass splits the stack into the safe stack (kept as-is for LLVM backend)
11 // and the unsafe stack (explicitly allocated and managed through the runtime
12 // support library).
13 //
14 // http://clang.llvm.org/docs/SafeStack.html
15 //
16 //===----------------------------------------------------------------------===//
17
18 #include "SafeStackColoring.h"
19 #include "SafeStackLayout.h"
20 #include "llvm/ADT/Statistic.h"
21 #include "llvm/ADT/Triple.h"
22 #include "llvm/Analysis/BranchProbabilityInfo.h"
23 #include "llvm/Analysis/ScalarEvolution.h"
24 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
25 #include "llvm/CodeGen/Passes.h"
26 #include "llvm/IR/Constants.h"
27 #include "llvm/IR/DIBuilder.h"
28 #include "llvm/IR/DataLayout.h"
29 #include "llvm/IR/DerivedTypes.h"
30 #include "llvm/IR/Function.h"
31 #include "llvm/IR/IRBuilder.h"
32 #include "llvm/IR/InstIterator.h"
33 #include "llvm/IR/Instructions.h"
34 #include "llvm/IR/IntrinsicInst.h"
35 #include "llvm/IR/Intrinsics.h"
36 #include "llvm/IR/MDBuilder.h"
37 #include "llvm/IR/Module.h"
38 #include "llvm/Pass.h"
39 #include "llvm/Support/CommandLine.h"
40 #include "llvm/Support/Debug.h"
41 #include "llvm/Support/Format.h"
42 #include "llvm/Support/MathExtras.h"
43 #include "llvm/Support/raw_os_ostream.h"
44 #include "llvm/Target/TargetLowering.h"
45 #include "llvm/Target/TargetSubtargetInfo.h"
46 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
47 #include "llvm/Transforms/Utils/Local.h"
48 #include "llvm/Transforms/Utils/ModuleUtils.h"
49
50 using namespace llvm;
51 using namespace llvm::safestack;
52
53 #define DEBUG_TYPE "safestack"
54
55 namespace llvm {
56
57 STATISTIC(NumFunctions, "Total number of functions");
58 STATISTIC(NumUnsafeStackFunctions, "Number of functions with unsafe stack");
59 STATISTIC(NumUnsafeStackRestorePointsFunctions,
60 "Number of functions that use setjmp or exceptions");
61
62 STATISTIC(NumAllocas, "Total number of allocas");
63 STATISTIC(NumUnsafeStaticAllocas, "Number of unsafe static allocas");
64 STATISTIC(NumUnsafeDynamicAllocas, "Number of unsafe dynamic allocas");
65 STATISTIC(NumUnsafeByValArguments, "Number of unsafe byval arguments");
66 STATISTIC(NumUnsafeStackRestorePoints, "Number of setjmps and landingpads");
67
68 } // namespace llvm
69
70 namespace {
71
72 /// Rewrite an SCEV expression for a memory access address to an expression that
73 /// represents offset from the given alloca.
74 ///
75 /// The implementation simply replaces all mentions of the alloca with zero.
76 class AllocaOffsetRewriter : public SCEVRewriteVisitor<AllocaOffsetRewriter> {
77 const Value *AllocaPtr;
78
79 public:
80 AllocaOffsetRewriter(ScalarEvolution &SE, const Value *AllocaPtr)
81 : SCEVRewriteVisitor(SE), AllocaPtr(AllocaPtr) {}
82
83 const SCEV *visitUnknown(const SCEVUnknown *Expr) {
84 if (Expr->getValue() == AllocaPtr)
85 return SE.getZero(Expr->getType());
86 return Expr;
87 }
88 };
89
90 /// The SafeStack pass splits the stack of each function into the safe
91 /// stack, which is only accessed through memory safe dereferences (as
92 /// determined statically), and the unsafe stack, which contains all
93 /// local variables that are accessed in ways that we can't prove to
94 /// be safe.
95 class SafeStack : public FunctionPass {
96 const TargetMachine *TM;
97 const TargetLoweringBase *TL;
98 const DataLayout *DL;
99 ScalarEvolution *SE;
100
101 Type *StackPtrTy;
102 Type *IntPtrTy;
103 Type *Int32Ty;
104 Type *Int8Ty;
105
106 Value *UnsafeStackPtr = nullptr;
107
108 /// Unsafe stack alignment. Each stack frame must ensure that the stack is
109 /// aligned to this value. We need to re-align the unsafe stack if the
110 /// alignment of any object on the stack exceeds this value.
111 ///
112 /// 16 seems like a reasonable upper bound on the alignment of objects that we
113 /// might expect to appear on the stack on most common targets.
114 enum { StackAlignment = 16 };
115
116 /// \brief Return the value of the stack canary.
117 Value *getStackGuard(IRBuilder<> &IRB, Function &F);
118
119 /// \brief Load stack guard from the frame and check if it has changed.
120 void checkStackGuard(IRBuilder<> &IRB, Function &F, ReturnInst &RI,
121 AllocaInst *StackGuardSlot, Value *StackGuard);
122
123 /// \brief Find all static allocas, dynamic allocas, return instructions and
124 /// stack restore points (exception unwind blocks and setjmp calls) in the
125 /// given function and append them to the respective vectors.
126 void findInsts(Function &F, SmallVectorImpl<AllocaInst *> &StaticAllocas,
127 SmallVectorImpl<AllocaInst *> &DynamicAllocas,
128 SmallVectorImpl<Argument *> &ByValArguments,
129 SmallVectorImpl<ReturnInst *> &Returns,
130 SmallVectorImpl<Instruction *> &StackRestorePoints);
131
132 /// \brief Calculate the allocation size of a given alloca. Returns 0 if the
133 /// size can not be statically determined.
134 uint64_t getStaticAllocaAllocationSize(const AllocaInst* AI);
135
136 /// \brief Allocate space for all static allocas in \p StaticAllocas,
137 /// replace allocas with pointers into the unsafe stack and generate code to
138 /// restore the stack pointer before all return instructions in \p Returns.
139 ///
140 /// \returns A pointer to the top of the unsafe stack after all unsafe static
141 /// allocas are allocated.
142 Value *moveStaticAllocasToUnsafeStack(IRBuilder<> &IRB, Function &F,
143 ArrayRef<AllocaInst *> StaticAllocas,
144 ArrayRef<Argument *> ByValArguments,
145 ArrayRef<ReturnInst *> Returns,
146 Instruction *BasePointer,
147 AllocaInst *StackGuardSlot);
148
149 /// \brief Generate code to restore the stack after all stack restore points
150 /// in \p StackRestorePoints.
151 ///
152 /// \returns A local variable in which to maintain the dynamic top of the
153 /// unsafe stack if needed.
154 AllocaInst *
155 createStackRestorePoints(IRBuilder<> &IRB, Function &F,
156 ArrayRef<Instruction *> StackRestorePoints,
157 Value *StaticTop, bool NeedDynamicTop);
158
159 /// \brief Replace all allocas in \p DynamicAllocas with code to allocate
160 /// space dynamically on the unsafe stack and store the dynamic unsafe stack
161 /// top to \p DynamicTop if non-null.
162 void moveDynamicAllocasToUnsafeStack(Function &F, Value *UnsafeStackPtr,
163 AllocaInst *DynamicTop,
164 ArrayRef<AllocaInst *> DynamicAllocas);
165
166 bool IsSafeStackAlloca(const Value *AllocaPtr, uint64_t AllocaSize);
167
168 bool IsMemIntrinsicSafe(const MemIntrinsic *MI, const Use &U,
169 const Value *AllocaPtr, uint64_t AllocaSize);
170 bool IsAccessSafe(Value *Addr, uint64_t Size, const Value *AllocaPtr,
171 uint64_t AllocaSize);
172
173 public:
174 static char ID; // Pass identification, replacement for typeid.
175 SafeStack(const TargetMachine *TM)
176 : FunctionPass(ID), TM(TM), TL(nullptr), DL(nullptr) {
177 initializeSafeStackPass(*PassRegistry::getPassRegistry());
178 }
179 SafeStack() : SafeStack(nullptr) {}
180
181 void getAnalysisUsage(AnalysisUsage &AU) const override {
182 AU.addRequired<ScalarEvolutionWrapperPass>();
183 }
184
185 bool doInitialization(Module &M) override {
186 DL = &M.getDataLayout();
187
188 StackPtrTy = Type::getInt8PtrTy(M.getContext());
189 IntPtrTy = DL->getIntPtrType(M.getContext());
190 Int32Ty = Type::getInt32Ty(M.getContext());
191 Int8Ty = Type::getInt8Ty(M.getContext());
192
193 return false;
194 }
195
196 bool runOnFunction(Function &F) override;
197 }; // class SafeStack
198
199 uint64_t SafeStack::getStaticAllocaAllocationSize(const AllocaInst* AI) {
200 uint64_t Size = DL->getTypeAllocSize(AI->getAllocatedType());
201 if (AI->isArrayAllocation()) {
202 auto C = dyn_cast<ConstantInt>(AI->getArraySize());
203 if (!C)
204 return 0;
205 Size *= C->getZExtValue();
206 }
207 return Size;
208 }
209
210 bool SafeStack::IsAccessSafe(Value *Addr, uint64_t AccessSize,
211 const Value *AllocaPtr, uint64_t AllocaSize) {
212 AllocaOffsetRewriter Rewriter(*SE, AllocaPtr);
213 const SCEV *Expr = Rewriter.visit(SE->getSCEV(Addr));
214
215 uint64_t BitWidth = SE->getTypeSizeInBits(Expr->getType());
216 ConstantRange AccessStartRange = SE->getUnsignedRange(Expr);
217 ConstantRange SizeRange =
218 ConstantRange(APInt(BitWidth, 0), APInt(BitWidth, AccessSize));
219 ConstantRange AccessRange = AccessStartRange.add(SizeRange);
220 ConstantRange AllocaRange =
221 ConstantRange(APInt(BitWidth, 0), APInt(BitWidth, AllocaSize));
222 bool Safe = AllocaRange.contains(AccessRange);
223
224 DEBUG(dbgs() << "[SafeStack] "
225 << (isa<AllocaInst>(AllocaPtr) ? "Alloca " : "ByValArgument ")
226 << *AllocaPtr << "\n"
227 << " Access " << *Addr << "\n"
228 << " SCEV " << *Expr
229 << " U: " << SE->getUnsignedRange(Expr)
230 << ", S: " << SE->getSignedRange(Expr) << "\n"
231 << " Range " << AccessRange << "\n"
232 << " AllocaRange " << AllocaRange << "\n"
233 << " " << (Safe ? "safe" : "unsafe") << "\n");
234
235 return Safe;
236 }
237
238 bool SafeStack::IsMemIntrinsicSafe(const MemIntrinsic *MI, const Use &U,
239 const Value *AllocaPtr,
240 uint64_t AllocaSize) {
241 // All MemIntrinsics have destination address in Arg0 and size in Arg2.
242 if (MI->getRawDest() != U) return true;
243 const auto *Len = dyn_cast<ConstantInt>(MI->getLength());
244 // Non-constant size => unsafe. FIXME: try SCEV getRange.
245 if (!Len) return false;
246 return IsAccessSafe(U, Len->getZExtValue(), AllocaPtr, AllocaSize);
247 }
248
249 /// Check whether a given allocation must be put on the safe
250 /// stack or not. The function analyzes all uses of AI and checks whether it is
251 /// only accessed in a memory safe way (as decided statically).
252 bool SafeStack::IsSafeStackAlloca(const Value *AllocaPtr, uint64_t AllocaSize) {
253 // Go through all uses of this alloca and check whether all accesses to the
254 // allocated object are statically known to be memory safe and, hence, the
255 // object can be placed on the safe stack.
256 SmallPtrSet<const Value *, 16> Visited;
257 SmallVector<const Value *, 8> WorkList;
258 WorkList.push_back(AllocaPtr);
259
260 // A DFS search through all uses of the alloca in bitcasts/PHI/GEPs/etc.
261 while (!WorkList.empty()) {
262 const Value *V = WorkList.pop_back_val();
263 for (const Use &UI : V->uses()) {
264 auto I = cast<const Instruction>(UI.getUser());
265 assert(V == UI.get());
266
267 switch (I->getOpcode()) {
268 case Instruction::Load: {
269 if (!IsAccessSafe(UI, DL->getTypeStoreSize(I->getType()), AllocaPtr,
270 AllocaSize))
271 return false;
272 break;
273 }
274 case Instruction::VAArg:
275 // "va-arg" from a pointer is safe.
276 break;
277 case Instruction::Store: {
278 if (V == I->getOperand(0)) {
279 // Stored the pointer - conservatively assume it may be unsafe.
280 DEBUG(dbgs() << "[SafeStack] Unsafe alloca: " << *AllocaPtr
281 << "\n store of address: " << *I << "\n");
282 return false;
283 }
284
285 if (!IsAccessSafe(UI, DL->getTypeStoreSize(I->getOperand(0)->getType()),
286 AllocaPtr, AllocaSize))
287 return false;
288 break;
289 }
290 case Instruction::Ret: {
291 // Information leak.
292 return false;
293 }
294
295 case Instruction::Call:
296 case Instruction::Invoke: {
297 ImmutableCallSite CS(I);
298
299 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
300 if (II->getIntrinsicID() == Intrinsic::lifetime_start ||
301 II->getIntrinsicID() == Intrinsic::lifetime_end)
302 continue;
303 }
304
305 if (const MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) {
306 if (!IsMemIntrinsicSafe(MI, UI, AllocaPtr, AllocaSize)) {
307 DEBUG(dbgs() << "[SafeStack] Unsafe alloca: " << *AllocaPtr
308 << "\n unsafe memintrinsic: " << *I
309 << "\n");
310 return false;
311 }
312 continue;
313 }
314
315 // LLVM 'nocapture' attribute is only set for arguments whose address
316 // is not stored, passed around, or used in any other non-trivial way.
317 // We assume that passing a pointer to an object as a 'nocapture
318 // readnone' argument is safe.
319 // FIXME: a more precise solution would require an interprocedural
320 // analysis here, which would look at all uses of an argument inside
321 // the function being called.
322 ImmutableCallSite::arg_iterator B = CS.arg_begin(), E = CS.arg_end();
323 for (ImmutableCallSite::arg_iterator A = B; A != E; ++A)
324 if (A->get() == V)
325 if (!(CS.doesNotCapture(A - B) && (CS.doesNotAccessMemory(A - B) ||
326 CS.doesNotAccessMemory()))) {
327 DEBUG(dbgs() << "[SafeStack] Unsafe alloca: " << *AllocaPtr
328 << "\n unsafe call: " << *I << "\n");
329 return false;
330 }
331 continue;
332 }
333
334 default:
335 if (Visited.insert(I).second)
336 WorkList.push_back(cast<const Instruction>(I));
337 }
338 }
339 }
340
341 // All uses of the alloca are safe, we can place it on the safe stack.
342 return true;
343 }
344
345 Value *SafeStack::getStackGuard(IRBuilder<> &IRB, Function &F) {
346 Value *StackGuardVar = TL->getIRStackGuard(IRB);
347 if (!StackGuardVar)
348 StackGuardVar =
349 F.getParent()->getOrInsertGlobal("__stack_chk_guard", StackPtrTy);
350 return IRB.CreateLoad(StackGuardVar, "StackGuard");
351 }
352
353 void SafeStack::findInsts(Function &F,
354 SmallVectorImpl<AllocaInst *> &StaticAllocas,
355 SmallVectorImpl<AllocaInst *> &DynamicAllocas,
356 SmallVectorImpl<Argument *> &ByValArguments,
357 SmallVectorImpl<ReturnInst *> &Returns,
358 SmallVectorImpl<Instruction *> &StackRestorePoints) {
359 for (Instruction &I : instructions(&F)) {
360 if (auto AI = dyn_cast<AllocaInst>(&I)) {
361 ++NumAllocas;
362
363 uint64_t Size = getStaticAllocaAllocationSize(AI);
364 if (IsSafeStackAlloca(AI, Size))
365 continue;
366
367 if (AI->isStaticAlloca()) {
368 ++NumUnsafeStaticAllocas;
369 StaticAllocas.push_back(AI);
370 } else {
371 ++NumUnsafeDynamicAllocas;
372 DynamicAllocas.push_back(AI);
373 }
374 } else if (auto RI = dyn_cast<ReturnInst>(&I)) {
375 Returns.push_back(RI);
376 } else if (auto CI = dyn_cast<CallInst>(&I)) {
377 // setjmps require stack restore.
378 if (CI->getCalledFunction() && CI->canReturnTwice())
379 StackRestorePoints.push_back(CI);
380 } else if (auto LP = dyn_cast<LandingPadInst>(&I)) {
381 // Exception landing pads require stack restore.
382 StackRestorePoints.push_back(LP);
383 } else if (auto II = dyn_cast<IntrinsicInst>(&I)) {
384 if (II->getIntrinsicID() == Intrinsic::gcroot)
385 llvm::report_fatal_error(
386 "gcroot intrinsic not compatible with safestack attribute");
387 }
388 }
389 for (Argument &Arg : F.args()) {
390 if (!Arg.hasByValAttr())
391 continue;
392 uint64_t Size =
393 DL->getTypeStoreSize(Arg.getType()->getPointerElementType());
394 if (IsSafeStackAlloca(&Arg, Size))
395 continue;
396
397 ++NumUnsafeByValArguments;
398 ByValArguments.push_back(&Arg);
399 }
400 }
401
402 AllocaInst *
403 SafeStack::createStackRestorePoints(IRBuilder<> &IRB, Function &F,
404 ArrayRef<Instruction *> StackRestorePoints,
405 Value *StaticTop, bool NeedDynamicTop) {
406 assert(StaticTop && "The stack top isn't set.");
407
408 if (StackRestorePoints.empty())
409 return nullptr;
410
411 // We need the current value of the shadow stack pointer to restore
412 // after longjmp or exception catching.
413
414 // FIXME: On some platforms this could be handled by the longjmp/exception
415 // runtime itself.
416
417 AllocaInst *DynamicTop = nullptr;
418 if (NeedDynamicTop) {
419 // If we also have dynamic alloca's, the stack pointer value changes
420 // throughout the function. For now we store it in an alloca.
421 DynamicTop = IRB.CreateAlloca(StackPtrTy, /*ArraySize=*/nullptr,
422 "unsafe_stack_dynamic_ptr");
423 IRB.CreateStore(StaticTop, DynamicTop);
424 }
425
426 // Restore current stack pointer after longjmp/exception catch.
427 for (Instruction *I : StackRestorePoints) {
428 ++NumUnsafeStackRestorePoints;
429
430 IRB.SetInsertPoint(I->getNextNode());
431 Value *CurrentTop = DynamicTop ? IRB.CreateLoad(DynamicTop) : StaticTop;
432 IRB.CreateStore(CurrentTop, UnsafeStackPtr);
433 }
434
435 return DynamicTop;
436 }
437
438 void SafeStack::checkStackGuard(IRBuilder<> &IRB, Function &F, ReturnInst &RI,
439 AllocaInst *StackGuardSlot, Value *StackGuard) {
440 Value *V = IRB.CreateLoad(StackGuardSlot);
441 Value *Cmp = IRB.CreateICmpNE(StackGuard, V);
442
443 auto SuccessProb = BranchProbabilityInfo::getBranchProbStackProtector(true);
444 auto FailureProb = BranchProbabilityInfo::getBranchProbStackProtector(false);
445 MDNode *Weights = MDBuilder(F.getContext())
446 .createBranchWeights(SuccessProb.getNumerator(),
447 FailureProb.getNumerator());
448 Instruction *CheckTerm =
449 SplitBlockAndInsertIfThen(Cmp, &RI,
450 /* Unreachable */ true, Weights);
451 IRBuilder<> IRBFail(CheckTerm);
452 // FIXME: respect -fsanitize-trap / -ftrap-function here?
453 Constant *StackChkFail = F.getParent()->getOrInsertFunction(
454 "__stack_chk_fail", IRB.getVoidTy(), nullptr);
455 IRBFail.CreateCall(StackChkFail, {});
456 }
457
458 /// We explicitly compute and set the unsafe stack layout for all unsafe
459 /// static alloca instructions. We save the unsafe "base pointer" in the
460 /// prologue into a local variable and restore it in the epilogue.
461 Value *SafeStack::moveStaticAllocasToUnsafeStack(
462 IRBuilder<> &IRB, Function &F, ArrayRef<AllocaInst *> StaticAllocas,
463 ArrayRef<Argument *> ByValArguments, ArrayRef<ReturnInst *> Returns,
464 Instruction *BasePointer, AllocaInst *StackGuardSlot) {
465 if (StaticAllocas.empty() && ByValArguments.empty())
466 return BasePointer;
467
468 DIBuilder DIB(*F.getParent());
469
470 StackColoring SSC(F, StaticAllocas);
471 SSC.run();
472 SSC.removeAllMarkers();
473
474 // Unsafe stack always grows down.
475 StackLayout SSL(StackAlignment);
476 if (StackGuardSlot) {
477 Type *Ty = StackGuardSlot->getAllocatedType();
478 unsigned Align =
479 std::max(DL->getPrefTypeAlignment(Ty), StackGuardSlot->getAlignment());
480 SSL.addObject(StackGuardSlot, getStaticAllocaAllocationSize(StackGuardSlot),
481 Align, SSC.getFullLiveRange());
482 }
483
484 for (Argument *Arg : ByValArguments) {
485 Type *Ty = Arg->getType()->getPointerElementType();
486 uint64_t Size = DL->getTypeStoreSize(Ty);
487 if (Size == 0)
488 Size = 1; // Don't create zero-sized stack objects.
489
490 // Ensure the object is properly aligned.
491 unsigned Align = std::max((unsigned)DL->getPrefTypeAlignment(Ty),
492 Arg->getParamAlignment());
493 SSL.addObject(Arg, Size, Align, SSC.getFullLiveRange());
494 }
495
496 for (AllocaInst *AI : StaticAllocas) {
497 Type *Ty = AI->getAllocatedType();
498 uint64_t Size = getStaticAllocaAllocationSize(AI);
499 if (Size == 0)
500 Size = 1; // Don't create zero-sized stack objects.
501
502 // Ensure the object is properly aligned.
503 unsigned Align =
504 std::max((unsigned)DL->getPrefTypeAlignment(Ty), AI->getAlignment());
505
506 SSL.addObject(AI, Size, Align, SSC.getLiveRange(AI));
507 }
508
509 SSL.computeLayout();
510 unsigned FrameAlignment = SSL.getFrameAlignment();
511
512 // FIXME: tell SSL that we start at a less-then-MaxAlignment aligned location
513 // (AlignmentSkew).
514 if (FrameAlignment > StackAlignment) {
515 // Re-align the base pointer according to the max requested alignment.
516 assert(isPowerOf2_32(FrameAlignment));
517 IRB.SetInsertPoint(BasePointer->getNextNode());
518 BasePointer = cast<Instruction>(IRB.CreateIntToPtr(
519 IRB.CreateAnd(IRB.CreatePtrToInt(BasePointer, IntPtrTy),
520 ConstantInt::get(IntPtrTy, ~uint64_t(FrameAlignment - 1))),
521 StackPtrTy));
522 }
523
524 IRB.SetInsertPoint(BasePointer->getNextNode());
525
526 if (StackGuardSlot) {
527 unsigned Offset = SSL.getObjectOffset(StackGuardSlot);
528 Value *Off = IRB.CreateGEP(BasePointer, // BasePointer is i8*
529 ConstantInt::get(Int32Ty, -Offset));
530 Value *NewAI =
531 IRB.CreateBitCast(Off, StackGuardSlot->getType(), "StackGuardSlot");
532
533 // Replace alloc with the new location.
534 StackGuardSlot->replaceAllUsesWith(NewAI);
535 StackGuardSlot->eraseFromParent();
536 }
537
538 for (Argument *Arg : ByValArguments) {
539 unsigned Offset = SSL.getObjectOffset(Arg);
540 Type *Ty = Arg->getType()->getPointerElementType();
541
542 uint64_t Size = DL->getTypeStoreSize(Ty);
543 if (Size == 0)
544 Size = 1; // Don't create zero-sized stack objects.
545
546 Value *Off = IRB.CreateGEP(BasePointer, // BasePointer is i8*
547 ConstantInt::get(Int32Ty, -Offset));
548 Value *NewArg = IRB.CreateBitCast(Off, Arg->getType(),
549 Arg->getName() + ".unsafe-byval");
550
551 // Replace alloc with the new location.
552 replaceDbgDeclare(Arg, BasePointer, BasePointer->getNextNode(), DIB,
553 /*Deref=*/true, -Offset);
554 Arg->replaceAllUsesWith(NewArg);
555 IRB.SetInsertPoint(cast<Instruction>(NewArg)->getNextNode());
556 IRB.CreateMemCpy(Off, Arg, Size, Arg->getParamAlignment());
557 }
558
559 // Allocate space for every unsafe static AllocaInst on the unsafe stack.
560 for (AllocaInst *AI : StaticAllocas) {
561 IRB.SetInsertPoint(AI);
562 unsigned Offset = SSL.getObjectOffset(AI);
563
564 uint64_t Size = getStaticAllocaAllocationSize(AI);
565 if (Size == 0)
566 Size = 1; // Don't create zero-sized stack objects.
567
568 replaceDbgDeclareForAlloca(AI, BasePointer, DIB, /*Deref=*/true, -Offset);
569 replaceDbgValueForAlloca(AI, BasePointer, DIB, -Offset);
570
571 // Replace uses of the alloca with the new location.
572 // Insert address calculation close to each use to work around PR27844.
573 std::string Name = std::string(AI->getName()) + ".unsafe";
574 while (!AI->use_empty()) {
575 Use &U = *AI->use_begin();
576 Instruction *User = cast<Instruction>(U.getUser());
577
578 Instruction *InsertBefore;
579 if (auto *PHI = dyn_cast<PHINode>(User))
580 InsertBefore = PHI->getIncomingBlock(U)->getTerminator();
581 else
582 InsertBefore = User;
583
584 IRBuilder<> IRBUser(InsertBefore);
585 Value *Off = IRBUser.CreateGEP(BasePointer, // BasePointer is i8*
586 ConstantInt::get(Int32Ty, -Offset));
587 Value *Replacement = IRBUser.CreateBitCast(Off, AI->getType(), Name);
588
589 if (auto *PHI = dyn_cast<PHINode>(User)) {
590 // PHI nodes may have multiple incoming edges from the same BB (why??),
591 // all must be updated at once with the same incoming value.
592 auto *BB = PHI->getIncomingBlock(U);
593 for (unsigned I = 0; I < PHI->getNumIncomingValues(); ++I)
594 if (PHI->getIncomingBlock(I) == BB)
595 PHI->setIncomingValue(I, Replacement);
596 } else {
597 U.set(Replacement);
598 }
599 }
600
601 AI->eraseFromParent();
602 }
603
604 // Re-align BasePointer so that our callees would see it aligned as
605 // expected.
606 // FIXME: no need to update BasePointer in leaf functions.
607 unsigned FrameSize = alignTo(SSL.getFrameSize(), StackAlignment);
608
609 // Update shadow stack pointer in the function epilogue.
610 IRB.SetInsertPoint(BasePointer->getNextNode());
611
612 Value *StaticTop =
613 IRB.CreateGEP(BasePointer, ConstantInt::get(Int32Ty, -FrameSize),
614 "unsafe_stack_static_top");
615 IRB.CreateStore(StaticTop, UnsafeStackPtr);
616 return StaticTop;
617 }
618
619 void SafeStack::moveDynamicAllocasToUnsafeStack(
620 Function &F, Value *UnsafeStackPtr, AllocaInst *DynamicTop,
621 ArrayRef<AllocaInst *> DynamicAllocas) {
622 DIBuilder DIB(*F.getParent());
623
624 for (AllocaInst *AI : DynamicAllocas) {
625 IRBuilder<> IRB(AI);
626
627 // Compute the new SP value (after AI).
628 Value *ArraySize = AI->getArraySize();
629 if (ArraySize->getType() != IntPtrTy)
630 ArraySize = IRB.CreateIntCast(ArraySize, IntPtrTy, false);
631
632 Type *Ty = AI->getAllocatedType();
633 uint64_t TySize = DL->getTypeAllocSize(Ty);
634 Value *Size = IRB.CreateMul(ArraySize, ConstantInt::get(IntPtrTy, TySize));
635
636 Value *SP = IRB.CreatePtrToInt(IRB.CreateLoad(UnsafeStackPtr), IntPtrTy);
637 SP = IRB.CreateSub(SP, Size);
638
639 // Align the SP value to satisfy the AllocaInst, type and stack alignments.
640 unsigned Align = std::max(
641 std::max((unsigned)DL->getPrefTypeAlignment(Ty), AI->getAlignment()),
642 (unsigned)StackAlignment);
643
644 assert(isPowerOf2_32(Align));
645 Value *NewTop = IRB.CreateIntToPtr(
646 IRB.CreateAnd(SP, ConstantInt::get(IntPtrTy, ~uint64_t(Align - 1))),
647 StackPtrTy);
648
649 // Save the stack pointer.
650 IRB.CreateStore(NewTop, UnsafeStackPtr);
651 if (DynamicTop)
652 IRB.CreateStore(NewTop, DynamicTop);
653
654 Value *NewAI = IRB.CreatePointerCast(NewTop, AI->getType());
655 if (AI->hasName() && isa<Instruction>(NewAI))
656 NewAI->takeName(AI);
657
658 replaceDbgDeclareForAlloca(AI, NewAI, DIB, /*Deref=*/true);
659 AI->replaceAllUsesWith(NewAI);
660 AI->eraseFromParent();
661 }
662
663 if (!DynamicAllocas.empty()) {
664 // Now go through the instructions again, replacing stacksave/stackrestore.
665 for (inst_iterator It = inst_begin(&F), Ie = inst_end(&F); It != Ie;) {
666 Instruction *I = &*(It++);
667 auto II = dyn_cast<IntrinsicInst>(I);
668 if (!II)
669 continue;
670
671 if (II->getIntrinsicID() == Intrinsic::stacksave) {
672 IRBuilder<> IRB(II);
673 Instruction *LI = IRB.CreateLoad(UnsafeStackPtr);
674 LI->takeName(II);
675 II->replaceAllUsesWith(LI);
676 II->eraseFromParent();
677 } else if (II->getIntrinsicID() == Intrinsic::stackrestore) {
678 IRBuilder<> IRB(II);
679 Instruction *SI = IRB.CreateStore(II->getArgOperand(0), UnsafeStackPtr);
680 SI->takeName(II);
681 assert(II->use_empty());
682 II->eraseFromParent();
683 }
684 }
685 }
686 }
687
688 bool SafeStack::runOnFunction(Function &F) {
689 DEBUG(dbgs() << "[SafeStack] Function: " << F.getName() << "\n");
690
691 if (!F.hasFnAttribute(Attribute::SafeStack)) {
692 DEBUG(dbgs() << "[SafeStack] safestack is not requested"
693 " for this function\n");
694 return false;
695 }
696
697 if (F.isDeclaration()) {
698 DEBUG(dbgs() << "[SafeStack] function definition"
699 " is not available\n");
700 return false;
701 }
702
703 if (!TM)
704 report_fatal_error("Target machine is required");
705 TL = TM->getSubtargetImpl(F)->getTargetLowering();
706 SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
707
708 ++NumFunctions;
709
710 SmallVector<AllocaInst *, 16> StaticAllocas;
711 SmallVector<AllocaInst *, 4> DynamicAllocas;
712 SmallVector<Argument *, 4> ByValArguments;
713 SmallVector<ReturnInst *, 4> Returns;
714
715 // Collect all points where stack gets unwound and needs to be restored
716 // This is only necessary because the runtime (setjmp and unwind code) is
717 // not aware of the unsafe stack and won't unwind/restore it properly.
718 // To work around this problem without changing the runtime, we insert
719 // instrumentation to restore the unsafe stack pointer when necessary.
720 SmallVector<Instruction *, 4> StackRestorePoints;
721
722 // Find all static and dynamic alloca instructions that must be moved to the
723 // unsafe stack, all return instructions and stack restore points.
724 findInsts(F, StaticAllocas, DynamicAllocas, ByValArguments, Returns,
725 StackRestorePoints);
726
727 if (StaticAllocas.empty() && DynamicAllocas.empty() &&
728 ByValArguments.empty() && StackRestorePoints.empty())
729 return false; // Nothing to do in this function.
730
731 if (!StaticAllocas.empty() || !DynamicAllocas.empty() ||
732 !ByValArguments.empty())
733 ++NumUnsafeStackFunctions; // This function has the unsafe stack.
734
735 if (!StackRestorePoints.empty())
736 ++NumUnsafeStackRestorePointsFunctions;
737
738 IRBuilder<> IRB(&F.front(), F.begin()->getFirstInsertionPt());
739 UnsafeStackPtr = TL->getSafeStackPointerLocation(IRB);
740
741 // Load the current stack pointer (we'll also use it as a base pointer).
742 // FIXME: use a dedicated register for it ?
743 Instruction *BasePointer =
744 IRB.CreateLoad(UnsafeStackPtr, false, "unsafe_stack_ptr");
745 assert(BasePointer->getType() == StackPtrTy);
746
747 AllocaInst *StackGuardSlot = nullptr;
748 // FIXME: implement weaker forms of stack protector.
749 if (F.hasFnAttribute(Attribute::StackProtect) ||
750 F.hasFnAttribute(Attribute::StackProtectStrong) ||
751 F.hasFnAttribute(Attribute::StackProtectReq)) {
752 Value *StackGuard = getStackGuard(IRB, F);
753 StackGuardSlot = IRB.CreateAlloca(StackPtrTy, nullptr);
754 IRB.CreateStore(StackGuard, StackGuardSlot);
755
756 for (ReturnInst *RI : Returns) {
757 IRBuilder<> IRBRet(RI);
758 checkStackGuard(IRBRet, F, *RI, StackGuardSlot, StackGuard);
759 }
760 }
761
762 // The top of the unsafe stack after all unsafe static allocas are
763 // allocated.
764 Value *StaticTop =
765 moveStaticAllocasToUnsafeStack(IRB, F, StaticAllocas, ByValArguments,
766 Returns, BasePointer, StackGuardSlot);
767
768 // Safe stack object that stores the current unsafe stack top. It is updated
769 // as unsafe dynamic (non-constant-sized) allocas are allocated and freed.
770 // This is only needed if we need to restore stack pointer after longjmp
771 // or exceptions, and we have dynamic allocations.
772 // FIXME: a better alternative might be to store the unsafe stack pointer
773 // before setjmp / invoke instructions.
774 AllocaInst *DynamicTop = createStackRestorePoints(
775 IRB, F, StackRestorePoints, StaticTop, !DynamicAllocas.empty());
776
777 // Handle dynamic allocas.
778 moveDynamicAllocasToUnsafeStack(F, UnsafeStackPtr, DynamicTop,
779 DynamicAllocas);
780
781 // Restore the unsafe stack pointer before each return.
782 for (ReturnInst *RI : Returns) {
783 IRB.SetInsertPoint(RI);
784 IRB.CreateStore(BasePointer, UnsafeStackPtr);
785 }
786
787 DEBUG(dbgs() << "[SafeStack] safestack applied\n");
788 return true;
789 }
790
791 } // anonymous namespace
792
793 char SafeStack::ID = 0;
794 INITIALIZE_TM_PASS_BEGIN(SafeStack, "safe-stack",
795 "Safe Stack instrumentation pass", false, false)
796 INITIALIZE_TM_PASS_END(SafeStack, "safe-stack",
797 "Safe Stack instrumentation pass", false, false)
798
799 FunctionPass *llvm::createSafeStackPass(const llvm::TargetMachine *TM) {
800 return new SafeStack(TM);
801 }