150
|
1 //===-- EHScopeStack.h - Stack for cleanup IR generation --------*- C++ -*-===//
|
|
2 //
|
|
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
4 // See https://llvm.org/LICENSE.txt for license information.
|
|
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
6 //
|
|
7 //===----------------------------------------------------------------------===//
|
|
8 //
|
|
9 // These classes should be the minimum interface required for other parts of
|
|
10 // CodeGen to emit cleanups. The implementation is in CGCleanup.cpp and other
|
|
11 // implemenentation details that are not widely needed are in CGCleanup.h.
|
|
12 //
|
|
13 //===----------------------------------------------------------------------===//
|
|
14
|
|
15 #ifndef LLVM_CLANG_LIB_CODEGEN_EHSCOPESTACK_H
|
|
16 #define LLVM_CLANG_LIB_CODEGEN_EHSCOPESTACK_H
|
|
17
|
|
18 #include "clang/Basic/LLVM.h"
|
|
19 #include "llvm/ADT/STLExtras.h"
|
|
20 #include "llvm/ADT/SmallVector.h"
|
|
21 #include "llvm/IR/BasicBlock.h"
|
|
22 #include "llvm/IR/Instructions.h"
|
|
23 #include "llvm/IR/Value.h"
|
|
24
|
|
25 namespace clang {
|
|
26 namespace CodeGen {
|
|
27
|
|
28 class CodeGenFunction;
|
|
29
|
|
30 /// A branch fixup. These are required when emitting a goto to a
|
|
31 /// label which hasn't been emitted yet. The goto is optimistically
|
|
32 /// emitted as a branch to the basic block for the label, and (if it
|
|
33 /// occurs in a scope with non-trivial cleanups) a fixup is added to
|
|
34 /// the innermost cleanup. When a (normal) cleanup is popped, any
|
|
35 /// unresolved fixups in that scope are threaded through the cleanup.
|
|
36 struct BranchFixup {
|
|
37 /// The block containing the terminator which needs to be modified
|
|
38 /// into a switch if this fixup is resolved into the current scope.
|
|
39 /// If null, LatestBranch points directly to the destination.
|
|
40 llvm::BasicBlock *OptimisticBranchBlock;
|
|
41
|
|
42 /// The ultimate destination of the branch.
|
|
43 ///
|
|
44 /// This can be set to null to indicate that this fixup was
|
|
45 /// successfully resolved.
|
|
46 llvm::BasicBlock *Destination;
|
|
47
|
|
48 /// The destination index value.
|
|
49 unsigned DestinationIndex;
|
|
50
|
|
51 /// The initial branch of the fixup.
|
|
52 llvm::BranchInst *InitialBranch;
|
|
53 };
|
|
54
|
|
55 template <class T> struct InvariantValue {
|
|
56 typedef T type;
|
|
57 typedef T saved_type;
|
|
58 static bool needsSaving(type value) { return false; }
|
|
59 static saved_type save(CodeGenFunction &CGF, type value) { return value; }
|
|
60 static type restore(CodeGenFunction &CGF, saved_type value) { return value; }
|
|
61 };
|
|
62
|
|
63 /// A metaprogramming class for ensuring that a value will dominate an
|
|
64 /// arbitrary position in a function.
|
|
65 template <class T> struct DominatingValue : InvariantValue<T> {};
|
|
66
|
|
67 template <class T, bool mightBeInstruction =
|
|
68 std::is_base_of<llvm::Value, T>::value &&
|
|
69 !std::is_base_of<llvm::Constant, T>::value &&
|
|
70 !std::is_base_of<llvm::BasicBlock, T>::value>
|
|
71 struct DominatingPointer;
|
|
72 template <class T> struct DominatingPointer<T,false> : InvariantValue<T*> {};
|
|
73 // template <class T> struct DominatingPointer<T,true> at end of file
|
|
74
|
|
75 template <class T> struct DominatingValue<T*> : DominatingPointer<T> {};
|
|
76
|
|
77 enum CleanupKind : unsigned {
|
|
78 /// Denotes a cleanup that should run when a scope is exited using exceptional
|
|
79 /// control flow (a throw statement leading to stack unwinding, ).
|
|
80 EHCleanup = 0x1,
|
|
81
|
|
82 /// Denotes a cleanup that should run when a scope is exited using normal
|
|
83 /// control flow (falling off the end of the scope, return, goto, ...).
|
|
84 NormalCleanup = 0x2,
|
|
85
|
|
86 NormalAndEHCleanup = EHCleanup | NormalCleanup,
|
|
87
|
|
88 InactiveCleanup = 0x4,
|
|
89 InactiveEHCleanup = EHCleanup | InactiveCleanup,
|
|
90 InactiveNormalCleanup = NormalCleanup | InactiveCleanup,
|
|
91 InactiveNormalAndEHCleanup = NormalAndEHCleanup | InactiveCleanup,
|
|
92
|
|
93 LifetimeMarker = 0x8,
|
|
94 NormalEHLifetimeMarker = LifetimeMarker | NormalAndEHCleanup,
|
|
95 };
|
|
96
|
|
97 /// A stack of scopes which respond to exceptions, including cleanups
|
|
98 /// and catch blocks.
|
|
99 class EHScopeStack {
|
|
100 public:
|
|
101 /* Should switch to alignof(uint64_t) instead of 8, when EHCleanupScope can */
|
|
102 enum { ScopeStackAlignment = 8 };
|
|
103
|
|
104 /// A saved depth on the scope stack. This is necessary because
|
|
105 /// pushing scopes onto the stack invalidates iterators.
|
|
106 class stable_iterator {
|
|
107 friend class EHScopeStack;
|
|
108
|
|
109 /// Offset from StartOfData to EndOfBuffer.
|
|
110 ptrdiff_t Size;
|
|
111
|
|
112 stable_iterator(ptrdiff_t Size) : Size(Size) {}
|
|
113
|
|
114 public:
|
|
115 static stable_iterator invalid() { return stable_iterator(-1); }
|
|
116 stable_iterator() : Size(-1) {}
|
|
117
|
|
118 bool isValid() const { return Size >= 0; }
|
|
119
|
|
120 /// Returns true if this scope encloses I.
|
|
121 /// Returns false if I is invalid.
|
|
122 /// This scope must be valid.
|
|
123 bool encloses(stable_iterator I) const { return Size <= I.Size; }
|
|
124
|
|
125 /// Returns true if this scope strictly encloses I: that is,
|
|
126 /// if it encloses I and is not I.
|
|
127 /// Returns false is I is invalid.
|
|
128 /// This scope must be valid.
|
|
129 bool strictlyEncloses(stable_iterator I) const { return Size < I.Size; }
|
|
130
|
|
131 friend bool operator==(stable_iterator A, stable_iterator B) {
|
|
132 return A.Size == B.Size;
|
|
133 }
|
|
134 friend bool operator!=(stable_iterator A, stable_iterator B) {
|
|
135 return A.Size != B.Size;
|
|
136 }
|
|
137 };
|
|
138
|
|
139 /// Information for lazily generating a cleanup. Subclasses must be
|
|
140 /// POD-like: cleanups will not be destructed, and they will be
|
|
141 /// allocated on the cleanup stack and freely copied and moved
|
|
142 /// around.
|
|
143 ///
|
|
144 /// Cleanup implementations should generally be declared in an
|
|
145 /// anonymous namespace.
|
|
146 class Cleanup {
|
|
147 // Anchor the construction vtable.
|
|
148 virtual void anchor();
|
|
149
|
|
150 protected:
|
|
151 ~Cleanup() = default;
|
|
152
|
|
153 public:
|
|
154 Cleanup(const Cleanup &) = default;
|
|
155 Cleanup(Cleanup &&) {}
|
|
156 Cleanup() = default;
|
|
157
|
|
158 /// Generation flags.
|
|
159 class Flags {
|
|
160 enum {
|
173
|
161 F_IsForEH = 0x1,
|
150
|
162 F_IsNormalCleanupKind = 0x2,
|
173
|
163 F_IsEHCleanupKind = 0x4,
|
|
164 F_HasExitSwitch = 0x8,
|
150
|
165 };
|
|
166 unsigned flags;
|
|
167
|
|
168 public:
|
|
169 Flags() : flags(0) {}
|
|
170
|
|
171 /// isForEH - true if the current emission is for an EH cleanup.
|
|
172 bool isForEHCleanup() const { return flags & F_IsForEH; }
|
|
173 bool isForNormalCleanup() const { return !isForEHCleanup(); }
|
|
174 void setIsForEHCleanup() { flags |= F_IsForEH; }
|
|
175
|
|
176 bool isNormalCleanupKind() const { return flags & F_IsNormalCleanupKind; }
|
|
177 void setIsNormalCleanupKind() { flags |= F_IsNormalCleanupKind; }
|
|
178
|
|
179 /// isEHCleanupKind - true if the cleanup was pushed as an EH
|
|
180 /// cleanup.
|
|
181 bool isEHCleanupKind() const { return flags & F_IsEHCleanupKind; }
|
|
182 void setIsEHCleanupKind() { flags |= F_IsEHCleanupKind; }
|
173
|
183
|
|
184 bool hasExitSwitch() const { return flags & F_HasExitSwitch; }
|
|
185 void setHasExitSwitch() { flags |= F_HasExitSwitch; }
|
150
|
186 };
|
|
187
|
|
188 /// Emit the cleanup. For normal cleanups, this is run in the
|
|
189 /// same EH context as when the cleanup was pushed, i.e. the
|
|
190 /// immediately-enclosing context of the cleanup scope. For
|
|
191 /// EH cleanups, this is run in a terminate context.
|
|
192 ///
|
|
193 // \param flags cleanup kind.
|
|
194 virtual void Emit(CodeGenFunction &CGF, Flags flags) = 0;
|
|
195 };
|
|
196
|
|
197 /// ConditionalCleanup stores the saved form of its parameters,
|
|
198 /// then restores them and performs the cleanup.
|
|
199 template <class T, class... As>
|
|
200 class ConditionalCleanup final : public Cleanup {
|
|
201 typedef std::tuple<typename DominatingValue<As>::saved_type...> SavedTuple;
|
|
202 SavedTuple Saved;
|
|
203
|
|
204 template <std::size_t... Is>
|
|
205 T restore(CodeGenFunction &CGF, std::index_sequence<Is...>) {
|
|
206 // It's important that the restores are emitted in order. The braced init
|
|
207 // list guarantees that.
|
|
208 return T{DominatingValue<As>::restore(CGF, std::get<Is>(Saved))...};
|
|
209 }
|
|
210
|
|
211 void Emit(CodeGenFunction &CGF, Flags flags) override {
|
|
212 restore(CGF, std::index_sequence_for<As...>()).Emit(CGF, flags);
|
|
213 }
|
|
214
|
|
215 public:
|
|
216 ConditionalCleanup(typename DominatingValue<As>::saved_type... A)
|
|
217 : Saved(A...) {}
|
|
218
|
|
219 ConditionalCleanup(SavedTuple Tuple) : Saved(std::move(Tuple)) {}
|
|
220 };
|
|
221
|
|
222 private:
|
|
223 // The implementation for this class is in CGException.h and
|
|
224 // CGException.cpp; the definition is here because it's used as a
|
|
225 // member of CodeGenFunction.
|
|
226
|
|
227 /// The start of the scope-stack buffer, i.e. the allocated pointer
|
|
228 /// for the buffer. All of these pointers are either simultaneously
|
|
229 /// null or simultaneously valid.
|
|
230 char *StartOfBuffer;
|
|
231
|
|
232 /// The end of the buffer.
|
|
233 char *EndOfBuffer;
|
|
234
|
|
235 /// The first valid entry in the buffer.
|
|
236 char *StartOfData;
|
|
237
|
|
238 /// The innermost normal cleanup on the stack.
|
|
239 stable_iterator InnermostNormalCleanup;
|
|
240
|
|
241 /// The innermost EH scope on the stack.
|
|
242 stable_iterator InnermostEHScope;
|
|
243
|
|
244 /// The current set of branch fixups. A branch fixup is a jump to
|
|
245 /// an as-yet unemitted label, i.e. a label for which we don't yet
|
|
246 /// know the EH stack depth. Whenever we pop a cleanup, we have
|
|
247 /// to thread all the current branch fixups through it.
|
|
248 ///
|
|
249 /// Fixups are recorded as the Use of the respective branch or
|
|
250 /// switch statement. The use points to the final destination.
|
|
251 /// When popping out of a cleanup, these uses are threaded through
|
|
252 /// the cleanup and adjusted to point to the new cleanup.
|
|
253 ///
|
|
254 /// Note that branches are allowed to jump into protected scopes
|
|
255 /// in certain situations; e.g. the following code is legal:
|
|
256 /// struct A { ~A(); }; // trivial ctor, non-trivial dtor
|
|
257 /// goto foo;
|
|
258 /// A a;
|
|
259 /// foo:
|
|
260 /// bar();
|
|
261 SmallVector<BranchFixup, 8> BranchFixups;
|
|
262
|
|
263 char *allocate(size_t Size);
|
|
264 void deallocate(size_t Size);
|
|
265
|
|
266 void *pushCleanup(CleanupKind K, size_t DataSize);
|
|
267
|
|
268 public:
|
|
269 EHScopeStack() : StartOfBuffer(nullptr), EndOfBuffer(nullptr),
|
|
270 StartOfData(nullptr), InnermostNormalCleanup(stable_end()),
|
|
271 InnermostEHScope(stable_end()) {}
|
|
272 ~EHScopeStack() { delete[] StartOfBuffer; }
|
|
273
|
|
274 /// Push a lazily-created cleanup on the stack.
|
|
275 template <class T, class... As> void pushCleanup(CleanupKind Kind, As... A) {
|
|
276 static_assert(alignof(T) <= ScopeStackAlignment,
|
|
277 "Cleanup's alignment is too large.");
|
|
278 void *Buffer = pushCleanup(Kind, sizeof(T));
|
|
279 Cleanup *Obj = new (Buffer) T(A...);
|
|
280 (void) Obj;
|
|
281 }
|
|
282
|
|
283 /// Push a lazily-created cleanup on the stack. Tuple version.
|
|
284 template <class T, class... As>
|
|
285 void pushCleanupTuple(CleanupKind Kind, std::tuple<As...> A) {
|
|
286 static_assert(alignof(T) <= ScopeStackAlignment,
|
|
287 "Cleanup's alignment is too large.");
|
|
288 void *Buffer = pushCleanup(Kind, sizeof(T));
|
|
289 Cleanup *Obj = new (Buffer) T(std::move(A));
|
|
290 (void) Obj;
|
|
291 }
|
|
292
|
|
293 // Feel free to add more variants of the following:
|
|
294
|
|
295 /// Push a cleanup with non-constant storage requirements on the
|
|
296 /// stack. The cleanup type must provide an additional static method:
|
|
297 /// static size_t getExtraSize(size_t);
|
|
298 /// The argument to this method will be the value N, which will also
|
|
299 /// be passed as the first argument to the constructor.
|
|
300 ///
|
|
301 /// The data stored in the extra storage must obey the same
|
|
302 /// restrictions as normal cleanup member data.
|
|
303 ///
|
|
304 /// The pointer returned from this method is valid until the cleanup
|
|
305 /// stack is modified.
|
|
306 template <class T, class... As>
|
|
307 T *pushCleanupWithExtra(CleanupKind Kind, size_t N, As... A) {
|
|
308 static_assert(alignof(T) <= ScopeStackAlignment,
|
|
309 "Cleanup's alignment is too large.");
|
|
310 void *Buffer = pushCleanup(Kind, sizeof(T) + T::getExtraSize(N));
|
|
311 return new (Buffer) T(N, A...);
|
|
312 }
|
|
313
|
|
314 void pushCopyOfCleanup(CleanupKind Kind, const void *Cleanup, size_t Size) {
|
|
315 void *Buffer = pushCleanup(Kind, Size);
|
|
316 std::memcpy(Buffer, Cleanup, Size);
|
|
317 }
|
|
318
|
|
319 /// Pops a cleanup scope off the stack. This is private to CGCleanup.cpp.
|
|
320 void popCleanup();
|
|
321
|
|
322 /// Push a set of catch handlers on the stack. The catch is
|
|
323 /// uninitialized and will need to have the given number of handlers
|
|
324 /// set on it.
|
|
325 class EHCatchScope *pushCatch(unsigned NumHandlers);
|
|
326
|
|
327 /// Pops a catch scope off the stack. This is private to CGException.cpp.
|
|
328 void popCatch();
|
|
329
|
|
330 /// Push an exceptions filter on the stack.
|
|
331 class EHFilterScope *pushFilter(unsigned NumFilters);
|
|
332
|
|
333 /// Pops an exceptions filter off the stack.
|
|
334 void popFilter();
|
|
335
|
|
336 /// Push a terminate handler on the stack.
|
|
337 void pushTerminate();
|
|
338
|
|
339 /// Pops a terminate handler off the stack.
|
|
340 void popTerminate();
|
|
341
|
|
342 // Returns true iff the current scope is either empty or contains only
|
|
343 // lifetime markers, i.e. no real cleanup code
|
|
344 bool containsOnlyLifetimeMarkers(stable_iterator Old) const;
|
|
345
|
|
346 /// Determines whether the exception-scopes stack is empty.
|
|
347 bool empty() const { return StartOfData == EndOfBuffer; }
|
|
348
|
|
349 bool requiresLandingPad() const;
|
|
350
|
|
351 /// Determines whether there are any normal cleanups on the stack.
|
|
352 bool hasNormalCleanups() const {
|
|
353 return InnermostNormalCleanup != stable_end();
|
|
354 }
|
|
355
|
|
356 /// Returns the innermost normal cleanup on the stack, or
|
|
357 /// stable_end() if there are no normal cleanups.
|
|
358 stable_iterator getInnermostNormalCleanup() const {
|
|
359 return InnermostNormalCleanup;
|
|
360 }
|
|
361 stable_iterator getInnermostActiveNormalCleanup() const;
|
|
362
|
|
363 stable_iterator getInnermostEHScope() const {
|
|
364 return InnermostEHScope;
|
|
365 }
|
|
366
|
|
367
|
|
368 /// An unstable reference to a scope-stack depth. Invalidated by
|
|
369 /// pushes but not pops.
|
|
370 class iterator;
|
|
371
|
|
372 /// Returns an iterator pointing to the innermost EH scope.
|
|
373 iterator begin() const;
|
|
374
|
|
375 /// Returns an iterator pointing to the outermost EH scope.
|
|
376 iterator end() const;
|
|
377
|
|
378 /// Create a stable reference to the top of the EH stack. The
|
|
379 /// returned reference is valid until that scope is popped off the
|
|
380 /// stack.
|
|
381 stable_iterator stable_begin() const {
|
|
382 return stable_iterator(EndOfBuffer - StartOfData);
|
|
383 }
|
|
384
|
|
385 /// Create a stable reference to the bottom of the EH stack.
|
|
386 static stable_iterator stable_end() {
|
|
387 return stable_iterator(0);
|
|
388 }
|
|
389
|
|
390 /// Translates an iterator into a stable_iterator.
|
|
391 stable_iterator stabilize(iterator it) const;
|
|
392
|
|
393 /// Turn a stable reference to a scope depth into a unstable pointer
|
|
394 /// to the EH stack.
|
|
395 iterator find(stable_iterator save) const;
|
|
396
|
|
397 /// Add a branch fixup to the current cleanup scope.
|
|
398 BranchFixup &addBranchFixup() {
|
|
399 assert(hasNormalCleanups() && "adding fixup in scope without cleanups");
|
|
400 BranchFixups.push_back(BranchFixup());
|
|
401 return BranchFixups.back();
|
|
402 }
|
|
403
|
|
404 unsigned getNumBranchFixups() const { return BranchFixups.size(); }
|
|
405 BranchFixup &getBranchFixup(unsigned I) {
|
|
406 assert(I < getNumBranchFixups());
|
|
407 return BranchFixups[I];
|
|
408 }
|
|
409
|
|
410 /// Pops lazily-removed fixups from the end of the list. This
|
|
411 /// should only be called by procedures which have just popped a
|
|
412 /// cleanup or resolved one or more fixups.
|
|
413 void popNullFixups();
|
|
414
|
|
415 /// Clears the branch-fixups list. This should only be called by
|
|
416 /// ResolveAllBranchFixups.
|
|
417 void clearFixups() { BranchFixups.clear(); }
|
|
418 };
|
|
419
|
|
420 } // namespace CodeGen
|
|
421 } // namespace clang
|
|
422
|
|
423 #endif
|