173
|
1 // RUN: mlir-opt -convert-linalg-on-tensors-to-buffers -buffer-placement -split-input-file %s | FileCheck %s -dump-input-on-failure
|
|
2
|
|
3 #map0 = affine_map<(d0) -> (d0)>
|
|
4
|
|
5 // CHECK-LABEL: func @muliple_results_generic_op
|
|
6 func @muliple_results_generic_op(%arg0: tensor<4xf32>) -> (tensor<4xf32>, tensor<4xf32>) {
|
|
7 %0, %1 = linalg.generic {args_in = 1 : i64, args_out = 2 : i64, indexing_maps = [#map0, #map0, #map0], iterator_types = ["parallel"]} %arg0 {
|
|
8 ^bb0(%gen_arg1: f32):
|
|
9 %tmp1 = exp %gen_arg1 : f32
|
|
10 linalg.yield %tmp1, %tmp1 : f32, f32
|
|
11 }: tensor<4xf32> -> (tensor<4xf32>, tensor<4xf32>)
|
|
12 return %0, %1 : tensor<4xf32>, tensor<4xf32>
|
|
13 }
|
|
14 // CHECK: (%[[NEW_ARG0:.*]]: [[TYPE:.*]], %[[ARG1_RESULT:.*]]: [[TYPE]], %[[ARG2_RESULT:.*]]: [[TYPE]])
|
|
15 // CHECK: %[[FIRST_ALLOC:.*]] = alloc() : [[TYPE]]
|
|
16 // CHECK: %[[SECOND_ALLOC:.*]] = alloc() : [[TYPE]]
|
|
17 // CHECK: linalg.generic
|
|
18 // CHECK-SAME: %[[NEW_ARG0]], %[[FIRST_ALLOC]], %[[SECOND_ALLOC]]
|
|
19 // CHECK-NEXT: ^{{[a-z0-9_]*}}
|
|
20 // CHECK-SAME: %{{.*}}: f32, %{{.*}}: f32, %{{.*}}: f32
|
|
21 // CHECK-NEXT: %{{.*}} = exp
|
|
22 // CHECK-NEXT: linalg.yield
|
|
23 // CHECK-NEXT: [[TYPE]], [[TYPE]], [[TYPE]]
|
|
24 // CHECK: linalg.copy(%[[FIRST_ALLOC]], %[[ARG1_RESULT]])
|
|
25 // CHECK: dealloc %[[FIRST_ALLOC]]
|
|
26 // CHECK: linalg.copy(%[[SECOND_ALLOC]], %[[ARG2_RESULT]])
|
|
27 // CHECK: dealloc %[[SECOND_ALLOC]]
|
|
28 // CHECK: return
|
|
29
|
|
30 // -----
|
|
31
|
|
32 #map0 = affine_map<(d0) -> (d0)>
|
|
33
|
|
34 // CHECK-LABEL: func @chained_operations
|
|
35 func @chained_operations(%arg0: tensor<4xf32>) -> tensor<4xf32> {
|
|
36 %0 = linalg.generic {args_in = 1 : i64, args_out = 1 : i64, indexing_maps = [#map0, #map0], iterator_types = ["parallel"]} %arg0 {
|
|
37 ^bb0(%gen_arg1: f32):
|
|
38 %tmp1 = exp %gen_arg1 : f32
|
|
39 linalg.yield %tmp1 : f32
|
|
40 }: tensor<4xf32> -> tensor<4xf32>
|
|
41 %1 = linalg.generic {args_in = 1 : i64, args_out = 1 : i64, indexing_maps = [#map0, #map0], iterator_types = ["parallel"]} %0 {
|
|
42 ^bb0(%gen_arg2: f32):
|
|
43 %tmp2 = exp %gen_arg2 : f32
|
|
44 linalg.yield %tmp2 : f32
|
|
45 }: tensor<4xf32> -> tensor<4xf32>
|
|
46 return %1 : tensor<4xf32>
|
|
47 }
|
|
48 // CHECK: (%[[NEW_ARG0:.*]]: [[TYPE:.*]], %[[ARG1_RESULT:.*]]: [[TYPE]])
|
|
49 // CHECK: %[[FIRST_ALLOC:.*]] = alloc() : [[TYPE]]
|
|
50 // CHECK: linalg.generic
|
|
51 // CHECK-SAME: %[[NEW_ARG0]], %[[FIRST_ALLOC]]
|
|
52 // CHECK: ^{{[a-z0-9_]*}}
|
|
53 // CHECK-SAME: %{{.*}}: f32, %{{.*}}: f32
|
|
54 // CHECK: [[TYPE]], [[TYPE]]
|
|
55 // CHECK: %[[SECOND_ALLOC:.*]] = alloc() : [[TYPE]]
|
|
56 // CHECK: linalg.generic
|
|
57 // CHECK-SAME: %[[FIRST_ALLOC]], %[[SECOND_ALLOC]]
|
|
58 // CHECK: ^{{[a-z0-9_]*}}
|
|
59 // CHECK-SAME: %{{.*}}: f32, %{{.*}}: f32
|
|
60 // CHECK: [[TYPE]], [[TYPE]]
|
|
61 // CHECK: dealloc %[[FIRST_ALLOC]]
|
|
62 // CHECK: linalg.copy(%[[SECOND_ALLOC]], %[[ARG1_RESULT]])
|
|
63 // CHECK: dealloc %[[SECOND_ALLOC]]
|
|
64 // CHECK: return
|
|
65
|
|
66 // -----
|
|
67
|
|
68 // CHECK-LABEL: func @no_linalg_op
|
|
69 func @no_linalg_op(%arg0: f32) -> (f32, f32) {
|
|
70 %0 = mulf %arg0, %arg0 : f32
|
|
71 return %0, %0 : f32, f32
|
|
72 }
|
|
73 // CHECK: (%[[NEW_ARG0:.*]]: [[TYPE:.*]]) -> ([[TYPE]], [[TYPE]])
|
|
74 // CHECK: %[[RESULT:.*]] = mulf %[[NEW_ARG0]], %[[NEW_ARG0]] : [[TYPE]]
|
|
75 // CHECK: return %[[RESULT]], %[[RESULT]] : [[TYPE]], [[TYPE]]
|