150
|
1 //===- PipelineDataTransfer.cpp --- Pass for pipelining data movement ---*-===//
|
|
2 //
|
|
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
4 // See https://llvm.org/LICENSE.txt for license information.
|
|
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
6 //
|
|
7 //===----------------------------------------------------------------------===//
|
|
8 //
|
|
9 // This file implements a pass to pipeline data transfers.
|
|
10 //
|
|
11 //===----------------------------------------------------------------------===//
|
|
12
|
|
13 #include "mlir/Transforms/Passes.h"
|
|
14
|
|
15 #include "mlir/Analysis/AffineAnalysis.h"
|
|
16 #include "mlir/Analysis/LoopAnalysis.h"
|
|
17 #include "mlir/Analysis/Utils.h"
|
|
18 #include "mlir/Dialect/AffineOps/AffineOps.h"
|
|
19 #include "mlir/Dialect/StandardOps/Ops.h"
|
|
20 #include "mlir/IR/Builders.h"
|
|
21 #include "mlir/Pass/Pass.h"
|
|
22 #include "mlir/Transforms/LoopUtils.h"
|
|
23 #include "mlir/Transforms/Utils.h"
|
|
24 #include "llvm/ADT/DenseMap.h"
|
|
25 #include "llvm/Support/Debug.h"
|
|
26 #define DEBUG_TYPE "affine-pipeline-data-transfer"
|
|
27
|
|
28 using namespace mlir;
|
|
29
|
|
30 namespace {
|
|
31
|
|
32 struct PipelineDataTransfer : public FunctionPass<PipelineDataTransfer> {
|
|
33 void runOnFunction() override;
|
|
34 void runOnAffineForOp(AffineForOp forOp);
|
|
35
|
|
36 std::vector<AffineForOp> forOps;
|
|
37 };
|
|
38
|
|
39 } // end anonymous namespace
|
|
40
|
|
41 /// Creates a pass to pipeline explicit movement of data across levels of the
|
|
42 /// memory hierarchy.
|
|
43 std::unique_ptr<OpPassBase<FuncOp>> mlir::createPipelineDataTransferPass() {
|
|
44 return std::make_unique<PipelineDataTransfer>();
|
|
45 }
|
|
46
|
|
47 // Returns the position of the tag memref operand given a DMA operation.
|
|
48 // Temporary utility: will be replaced when DmaStart/DmaFinish abstract op's are
|
|
49 // added. TODO(b/117228571)
|
|
50 static unsigned getTagMemRefPos(Operation &dmaInst) {
|
|
51 assert(isa<AffineDmaStartOp>(dmaInst) || isa<AffineDmaWaitOp>(dmaInst));
|
|
52 if (auto dmaStartOp = dyn_cast<AffineDmaStartOp>(dmaInst)) {
|
|
53 return dmaStartOp.getTagMemRefOperandIndex();
|
|
54 }
|
|
55 // First operand for a dma finish operation.
|
|
56 return 0;
|
|
57 }
|
|
58
|
|
59 /// Doubles the buffer of the supplied memref on the specified 'affine.for'
|
|
60 /// operation by adding a leading dimension of size two to the memref.
|
|
61 /// Replaces all uses of the old memref by the new one while indexing the newly
|
|
62 /// added dimension by the loop IV of the specified 'affine.for' operation
|
|
63 /// modulo 2. Returns false if such a replacement cannot be performed.
|
|
64 static bool doubleBuffer(Value oldMemRef, AffineForOp forOp) {
|
|
65 auto *forBody = forOp.getBody();
|
|
66 OpBuilder bInner(forBody, forBody->begin());
|
|
67
|
|
68 // Doubles the shape with a leading dimension extent of 2.
|
|
69 auto doubleShape = [&](MemRefType oldMemRefType) -> MemRefType {
|
|
70 // Add the leading dimension in the shape for the double buffer.
|
|
71 ArrayRef<int64_t> oldShape = oldMemRefType.getShape();
|
|
72 SmallVector<int64_t, 4> newShape(1 + oldMemRefType.getRank());
|
|
73 newShape[0] = 2;
|
|
74 std::copy(oldShape.begin(), oldShape.end(), newShape.begin() + 1);
|
|
75 return MemRefType::Builder(oldMemRefType)
|
|
76 .setShape(newShape)
|
|
77 .setAffineMaps({});
|
|
78 };
|
|
79
|
|
80 auto oldMemRefType = oldMemRef.getType().cast<MemRefType>();
|
|
81 auto newMemRefType = doubleShape(oldMemRefType);
|
|
82
|
|
83 // The double buffer is allocated right before 'forInst'.
|
|
84 auto *forInst = forOp.getOperation();
|
|
85 OpBuilder bOuter(forInst);
|
|
86 // Put together alloc operands for any dynamic dimensions of the memref.
|
|
87 SmallVector<Value, 4> allocOperands;
|
|
88 unsigned dynamicDimCount = 0;
|
|
89 for (auto dimSize : oldMemRefType.getShape()) {
|
|
90 if (dimSize == -1)
|
|
91 allocOperands.push_back(bOuter.create<DimOp>(forInst->getLoc(), oldMemRef,
|
|
92 dynamicDimCount++));
|
|
93 }
|
|
94
|
|
95 // Create and place the alloc right before the 'affine.for' operation.
|
|
96 Value newMemRef =
|
|
97 bOuter.create<AllocOp>(forInst->getLoc(), newMemRefType, allocOperands);
|
|
98
|
|
99 // Create 'iv mod 2' value to index the leading dimension.
|
|
100 auto d0 = bInner.getAffineDimExpr(0);
|
|
101 int64_t step = forOp.getStep();
|
|
102 auto modTwoMap = AffineMap::get(/*dimCount=*/1, /*symbolCount=*/0,
|
|
103 {d0.floorDiv(step) % 2});
|
|
104 auto ivModTwoOp = bInner.create<AffineApplyOp>(forOp.getLoc(), modTwoMap,
|
|
105 forOp.getInductionVar());
|
|
106
|
|
107 // replaceAllMemRefUsesWith will succeed unless the forOp body has
|
|
108 // non-dereferencing uses of the memref (dealloc's are fine though).
|
|
109 if (failed(replaceAllMemRefUsesWith(
|
|
110 oldMemRef, newMemRef,
|
|
111 /*extraIndices=*/{ivModTwoOp},
|
|
112 /*indexRemap=*/AffineMap(),
|
|
113 /*extraOperands=*/{},
|
|
114 /*symbolOperands=*/{},
|
|
115 /*domInstFilter=*/&*forOp.getBody()->begin()))) {
|
|
116 LLVM_DEBUG(
|
|
117 forOp.emitError("memref replacement for double buffering failed"));
|
|
118 ivModTwoOp.erase();
|
|
119 return false;
|
|
120 }
|
|
121 // Insert the dealloc op right after the for loop.
|
|
122 bOuter.setInsertionPointAfter(forInst);
|
|
123 bOuter.create<DeallocOp>(forInst->getLoc(), newMemRef);
|
|
124
|
|
125 return true;
|
|
126 }
|
|
127
|
|
128 /// Returns success if the IR is in a valid state.
|
|
129 void PipelineDataTransfer::runOnFunction() {
|
|
130 // Do a post order walk so that inner loop DMAs are processed first. This is
|
|
131 // necessary since 'affine.for' operations nested within would otherwise
|
|
132 // become invalid (erased) when the outer loop is pipelined (the pipelined one
|
|
133 // gets deleted and replaced by a prologue, a new steady-state loop and an
|
|
134 // epilogue).
|
|
135 forOps.clear();
|
|
136 getFunction().walk([&](AffineForOp forOp) { forOps.push_back(forOp); });
|
|
137 for (auto forOp : forOps)
|
|
138 runOnAffineForOp(forOp);
|
|
139 }
|
|
140
|
|
141 // Check if tags of the dma start op and dma wait op match.
|
|
142 static bool checkTagMatch(AffineDmaStartOp startOp, AffineDmaWaitOp waitOp) {
|
|
143 if (startOp.getTagMemRef() != waitOp.getTagMemRef())
|
|
144 return false;
|
|
145 auto startIndices = startOp.getTagIndices();
|
|
146 auto waitIndices = waitOp.getTagIndices();
|
|
147 // Both of these have the same number of indices since they correspond to the
|
|
148 // same tag memref.
|
|
149 for (auto it = startIndices.begin(), wIt = waitIndices.begin(),
|
|
150 e = startIndices.end();
|
|
151 it != e; ++it, ++wIt) {
|
|
152 // Keep it simple for now, just checking if indices match.
|
|
153 // TODO(mlir-team): this would in general need to check if there is no
|
|
154 // intervening write writing to the same tag location, i.e., memory last
|
|
155 // write/data flow analysis. This is however sufficient/powerful enough for
|
|
156 // now since the DMA generation pass or the input for it will always have
|
|
157 // start/wait with matching tags (same SSA operand indices).
|
|
158 if (*it != *wIt)
|
|
159 return false;
|
|
160 }
|
|
161 return true;
|
|
162 }
|
|
163
|
|
164 // Identify matching DMA start/finish operations to overlap computation with.
|
|
165 static void findMatchingStartFinishInsts(
|
|
166 AffineForOp forOp,
|
|
167 SmallVectorImpl<std::pair<Operation *, Operation *>> &startWaitPairs) {
|
|
168
|
|
169 // Collect outgoing DMA operations - needed to check for dependences below.
|
|
170 SmallVector<AffineDmaStartOp, 4> outgoingDmaOps;
|
|
171 for (auto &op : *forOp.getBody()) {
|
|
172 auto dmaStartOp = dyn_cast<AffineDmaStartOp>(op);
|
|
173 if (dmaStartOp && dmaStartOp.isSrcMemorySpaceFaster())
|
|
174 outgoingDmaOps.push_back(dmaStartOp);
|
|
175 }
|
|
176
|
|
177 SmallVector<Operation *, 4> dmaStartInsts, dmaFinishInsts;
|
|
178 for (auto &op : *forOp.getBody()) {
|
|
179 // Collect DMA finish operations.
|
|
180 if (isa<AffineDmaWaitOp>(op)) {
|
|
181 dmaFinishInsts.push_back(&op);
|
|
182 continue;
|
|
183 }
|
|
184 auto dmaStartOp = dyn_cast<AffineDmaStartOp>(op);
|
|
185 if (!dmaStartOp)
|
|
186 continue;
|
|
187
|
|
188 // Only DMAs incoming into higher memory spaces are pipelined for now.
|
|
189 // TODO(bondhugula): handle outgoing DMA pipelining.
|
|
190 if (!dmaStartOp.isDestMemorySpaceFaster())
|
|
191 continue;
|
|
192
|
|
193 // Check for dependence with outgoing DMAs. Doing this conservatively.
|
|
194 // TODO(andydavis,bondhugula): use the dependence analysis to check for
|
|
195 // dependences between an incoming and outgoing DMA in the same iteration.
|
|
196 auto it = outgoingDmaOps.begin();
|
|
197 for (; it != outgoingDmaOps.end(); ++it) {
|
|
198 if (it->getDstMemRef() == dmaStartOp.getSrcMemRef())
|
|
199 break;
|
|
200 }
|
|
201 if (it != outgoingDmaOps.end())
|
|
202 continue;
|
|
203
|
|
204 // We only double buffer if the buffer is not live out of loop.
|
|
205 auto memref = dmaStartOp.getOperand(dmaStartOp.getFasterMemPos());
|
|
206 bool escapingUses = false;
|
|
207 for (auto *user : memref.getUsers()) {
|
|
208 // We can double buffer regardless of dealloc's outside the loop.
|
|
209 if (isa<DeallocOp>(user))
|
|
210 continue;
|
|
211 if (!forOp.getBody()->findAncestorOpInBlock(*user)) {
|
|
212 LLVM_DEBUG(llvm::dbgs()
|
|
213 << "can't pipeline: buffer is live out of loop\n";);
|
|
214 escapingUses = true;
|
|
215 break;
|
|
216 }
|
|
217 }
|
|
218 if (!escapingUses)
|
|
219 dmaStartInsts.push_back(&op);
|
|
220 }
|
|
221
|
|
222 // For each start operation, we look for a matching finish operation.
|
|
223 for (auto *dmaStartInst : dmaStartInsts) {
|
|
224 for (auto *dmaFinishInst : dmaFinishInsts) {
|
|
225 if (checkTagMatch(cast<AffineDmaStartOp>(dmaStartInst),
|
|
226 cast<AffineDmaWaitOp>(dmaFinishInst))) {
|
|
227 startWaitPairs.push_back({dmaStartInst, dmaFinishInst});
|
|
228 break;
|
|
229 }
|
|
230 }
|
|
231 }
|
|
232 }
|
|
233
|
|
234 /// Overlap DMA transfers with computation in this loop. If successful,
|
|
235 /// 'forOp' is deleted, and a prologue, a new pipelined loop, and epilogue are
|
|
236 /// inserted right before where it was.
|
|
237 void PipelineDataTransfer::runOnAffineForOp(AffineForOp forOp) {
|
|
238 auto mayBeConstTripCount = getConstantTripCount(forOp);
|
|
239 if (!mayBeConstTripCount.hasValue()) {
|
|
240 LLVM_DEBUG(
|
|
241 forOp.emitRemark("won't pipeline due to unknown trip count loop"));
|
|
242 return;
|
|
243 }
|
|
244
|
|
245 SmallVector<std::pair<Operation *, Operation *>, 4> startWaitPairs;
|
|
246 findMatchingStartFinishInsts(forOp, startWaitPairs);
|
|
247
|
|
248 if (startWaitPairs.empty()) {
|
|
249 LLVM_DEBUG(forOp.emitRemark("No dma start/finish pairs\n"));
|
|
250 return;
|
|
251 }
|
|
252
|
|
253 // Double the buffers for the higher memory space memref's.
|
|
254 // Identify memref's to replace by scanning through all DMA start
|
|
255 // operations. A DMA start operation has two memref's - the one from the
|
|
256 // higher level of memory hierarchy is the one to double buffer.
|
|
257 // TODO(bondhugula): check whether double-buffering is even necessary.
|
|
258 // TODO(bondhugula): make this work with different layouts: assuming here that
|
|
259 // the dimension we are adding here for the double buffering is the outermost
|
|
260 // dimension.
|
|
261 for (auto &pair : startWaitPairs) {
|
|
262 auto *dmaStartInst = pair.first;
|
|
263 Value oldMemRef = dmaStartInst->getOperand(
|
|
264 cast<AffineDmaStartOp>(dmaStartInst).getFasterMemPos());
|
|
265 if (!doubleBuffer(oldMemRef, forOp)) {
|
|
266 // Normally, double buffering should not fail because we already checked
|
|
267 // that there are no uses outside.
|
|
268 LLVM_DEBUG(llvm::dbgs()
|
|
269 << "double buffering failed for" << dmaStartInst << "\n";);
|
|
270 // IR still valid and semantically correct.
|
|
271 return;
|
|
272 }
|
|
273 // If the old memref has no more uses, remove its 'dead' alloc if it was
|
|
274 // alloc'ed. (note: DMA buffers are rarely function live-in; but a 'dim'
|
|
275 // operation could have been used on it if it was dynamically shaped in
|
|
276 // order to create the double buffer above.)
|
|
277 // '-canonicalize' does this in a more general way, but we'll anyway do the
|
|
278 // simple/common case so that the output / test cases looks clear.
|
|
279 if (auto *allocInst = oldMemRef.getDefiningOp()) {
|
|
280 if (oldMemRef.use_empty()) {
|
|
281 allocInst->erase();
|
|
282 } else if (oldMemRef.hasOneUse()) {
|
|
283 if (auto dealloc = dyn_cast<DeallocOp>(*oldMemRef.user_begin())) {
|
|
284 dealloc.erase();
|
|
285 allocInst->erase();
|
|
286 }
|
|
287 }
|
|
288 }
|
|
289 }
|
|
290
|
|
291 // Double the buffers for tag memrefs.
|
|
292 for (auto &pair : startWaitPairs) {
|
|
293 auto *dmaFinishInst = pair.second;
|
|
294 Value oldTagMemRef =
|
|
295 dmaFinishInst->getOperand(getTagMemRefPos(*dmaFinishInst));
|
|
296 if (!doubleBuffer(oldTagMemRef, forOp)) {
|
|
297 LLVM_DEBUG(llvm::dbgs() << "tag double buffering failed\n";);
|
|
298 return;
|
|
299 }
|
|
300 // If the old tag has no uses or a single dealloc use, remove it.
|
|
301 // (canonicalization handles more complex cases).
|
|
302 if (auto *tagAllocInst = oldTagMemRef.getDefiningOp()) {
|
|
303 if (oldTagMemRef.use_empty()) {
|
|
304 tagAllocInst->erase();
|
|
305 } else if (oldTagMemRef.hasOneUse()) {
|
|
306 if (auto dealloc = dyn_cast<DeallocOp>(*oldTagMemRef.user_begin())) {
|
|
307 dealloc.erase();
|
|
308 tagAllocInst->erase();
|
|
309 }
|
|
310 }
|
|
311 }
|
|
312 }
|
|
313
|
|
314 // Double buffering would have invalidated all the old DMA start/wait insts.
|
|
315 startWaitPairs.clear();
|
|
316 findMatchingStartFinishInsts(forOp, startWaitPairs);
|
|
317
|
|
318 // Store shift for operation for later lookup for AffineApplyOp's.
|
|
319 DenseMap<Operation *, unsigned> instShiftMap;
|
|
320 for (auto &pair : startWaitPairs) {
|
|
321 auto *dmaStartInst = pair.first;
|
|
322 assert(isa<AffineDmaStartOp>(dmaStartInst));
|
|
323 instShiftMap[dmaStartInst] = 0;
|
|
324 // Set shifts for DMA start op's affine operand computation slices to 0.
|
|
325 SmallVector<AffineApplyOp, 4> sliceOps;
|
|
326 mlir::createAffineComputationSlice(dmaStartInst, &sliceOps);
|
|
327 if (!sliceOps.empty()) {
|
|
328 for (auto sliceOp : sliceOps) {
|
|
329 instShiftMap[sliceOp.getOperation()] = 0;
|
|
330 }
|
|
331 } else {
|
|
332 // If a slice wasn't created, the reachable affine.apply op's from its
|
|
333 // operands are the ones that go with it.
|
|
334 SmallVector<Operation *, 4> affineApplyInsts;
|
|
335 SmallVector<Value, 4> operands(dmaStartInst->getOperands());
|
|
336 getReachableAffineApplyOps(operands, affineApplyInsts);
|
|
337 for (auto *op : affineApplyInsts) {
|
|
338 instShiftMap[op] = 0;
|
|
339 }
|
|
340 }
|
|
341 }
|
|
342 // Everything else (including compute ops and dma finish) are shifted by one.
|
|
343 for (auto &op : *forOp.getBody()) {
|
|
344 if (instShiftMap.find(&op) == instShiftMap.end()) {
|
|
345 instShiftMap[&op] = 1;
|
|
346 }
|
|
347 }
|
|
348
|
|
349 // Get shifts stored in map.
|
|
350 std::vector<uint64_t> shifts(forOp.getBody()->getOperations().size());
|
|
351 unsigned s = 0;
|
|
352 for (auto &op : *forOp.getBody()) {
|
|
353 assert(instShiftMap.find(&op) != instShiftMap.end());
|
|
354 shifts[s++] = instShiftMap[&op];
|
|
355
|
|
356 // Tagging operations with shifts for debugging purposes.
|
|
357 LLVM_DEBUG({
|
|
358 OpBuilder b(&op);
|
|
359 op.setAttr("shift", b.getI64IntegerAttr(shifts[s - 1]));
|
|
360 });
|
|
361 }
|
|
362
|
|
363 if (!isInstwiseShiftValid(forOp, shifts)) {
|
|
364 // Violates dependences.
|
|
365 LLVM_DEBUG(llvm::dbgs() << "Shifts invalid - unexpected\n";);
|
|
366 return;
|
|
367 }
|
|
368
|
|
369 if (failed(instBodySkew(forOp, shifts))) {
|
|
370 LLVM_DEBUG(llvm::dbgs() << "op body skewing failed - unexpected\n";);
|
|
371 return;
|
|
372 }
|
|
373 }
|
|
374
|
|
375 static PassRegistration<PipelineDataTransfer> pass(
|
|
376 "affine-pipeline-data-transfer",
|
|
377 "Pipeline non-blocking data transfers between explicitly managed levels of "
|
|
378 "the memory hierarchy");
|