Mercurial > hg > CbC > CbC_llvm
comparison mlir/test/Examples/Toy/Ch7/shape_inference.mlir @ 173:0572611fdcc8 llvm10 llvm12
reorgnization done
author | Shinji KONO <kono@ie.u-ryukyu.ac.jp> |
---|---|
date | Mon, 25 May 2020 11:55:54 +0900 |
parents | 1d019706d866 |
children | 2e18cbf3894f |
comparison
equal
deleted
inserted
replaced
172:9fbae9c8bf63 | 173:0572611fdcc8 |
---|---|
2 | 2 |
3 // Check the result of inlining+shape inference on an input module. | 3 // Check the result of inlining+shape inference on an input module. |
4 | 4 |
5 func @multiply_transpose(%arg0: tensor<*xf64>, %arg1: tensor<*xf64>) -> tensor<*xf64> | 5 func @multiply_transpose(%arg0: tensor<*xf64>, %arg1: tensor<*xf64>) -> tensor<*xf64> |
6 attributes { sym_visibility = "private" } { | 6 attributes { sym_visibility = "private" } { |
7 %0 = "toy.transpose"(%arg0) : (tensor<*xf64>) -> tensor<*xf64> | 7 %0 = toy.transpose(%arg0 : tensor<*xf64>) to tensor<*xf64> |
8 %1 = "toy.transpose"(%arg1) : (tensor<*xf64>) -> tensor<*xf64> | 8 %1 = toy.transpose(%arg1 : tensor<*xf64>) to tensor<*xf64> |
9 %2 = "toy.mul"(%0, %1) : (tensor<*xf64>, tensor<*xf64>) -> tensor<*xf64> | 9 %2 = toy.mul %0, %1 : tensor<*xf64> |
10 "toy.return"(%2) : (tensor<*xf64>) -> () | 10 toy.return %2 : tensor<*xf64> |
11 } | 11 } |
12 func @main() { | 12 func @main() { |
13 %0 = "toy.constant"() {value = dense<[[1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64>} : () -> tensor<2x3xf64> | 13 %0 = toy.constant dense<[[1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64> |
14 %1 = "toy.reshape"(%0) : (tensor<2x3xf64>) -> tensor<2x3xf64> | 14 %1 = toy.reshape(%0 : tensor<2x3xf64>) to tensor<2x3xf64> |
15 %2 = "toy.constant"() {value = dense<[1.000000e+00, 2.000000e+00, 3.000000e+00, 4.000000e+00, 5.000000e+00, 6.000000e+00]> : tensor<6xf64>} : () -> tensor<6xf64> | 15 %2 = toy.constant dense<[1.000000e+00, 2.000000e+00, 3.000000e+00, 4.000000e+00, 5.000000e+00, 6.000000e+00]> : tensor<6xf64> |
16 %3 = "toy.reshape"(%2) : (tensor<6xf64>) -> tensor<2x3xf64> | 16 %3 = toy.reshape(%2 : tensor<6xf64>) to tensor<2x3xf64> |
17 %4 = "toy.generic_call"(%1, %3) {callee = @multiply_transpose} : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64> | 17 %4 = toy.generic_call @multiply_transpose(%1, %3) : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64> |
18 %5 = "toy.generic_call"(%3, %1) {callee = @multiply_transpose} : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64> | 18 %5 = toy.generic_call @multiply_transpose(%3, %1) : (tensor<2x3xf64>, tensor<2x3xf64>) -> tensor<*xf64> |
19 "toy.print"(%5) : (tensor<*xf64>) -> () | 19 toy.print %5 : tensor<*xf64> |
20 "toy.return"() : () -> () | 20 toy.return |
21 } | 21 } |
22 | 22 |
23 // CHECK-NOT: func @multiply_transpose | 23 // CHECK-NOT: func @multiply_transpose |
24 // CHECK-NOT: tensor<*xf64> | 24 // CHECK-NOT: tensor<*xf64> |
25 | 25 |
26 // CHECK-LABEL: func @main() | 26 // CHECK-LABEL: func @main() |
27 // CHECK: [[VAL_0:%.*]] = "toy.constant"() {value = dense<{{\[\[}}1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64>} : () -> tensor<2x3xf64> | 27 // CHECK: [[VAL_0:%.*]] = toy.constant dense<{{\[\[}}1.000000e+00, 2.000000e+00, 3.000000e+00], [4.000000e+00, 5.000000e+00, 6.000000e+00]]> : tensor<2x3xf64> |
28 // CHECK: [[VAL_1:%.*]] = "toy.transpose"([[VAL_0]]) : (tensor<2x3xf64>) -> tensor<3x2xf64> | 28 // CHECK: [[VAL_1:%.*]] = toy.transpose([[VAL_0]] : tensor<2x3xf64>) to tensor<3x2xf64> |
29 // CHECK: [[VAL_2:%.*]] = "toy.mul"([[VAL_1]], [[VAL_1]]) : (tensor<3x2xf64>, tensor<3x2xf64>) -> tensor<3x2xf64> | 29 // CHECK: [[VAL_2:%.*]] = toy.mul [[VAL_1]], [[VAL_1]] : tensor<3x2xf64> |
30 // CHECK: "toy.print"([[VAL_2]]) : (tensor<3x2xf64>) -> () | 30 // CHECK: toy.print [[VAL_2]] : tensor<3x2xf64> |
31 // CHECK: "toy.return"() : () -> () | 31 // CHECK: toy.return |