Mercurial > hg > Members > tobaru > cbc > CbC_llvm
comparison test/CodeGen/AArch64/arm64-fast-isel-conversion.ll @ 95:afa8332a0e37
LLVM 3.8
author | Kaito Tokumori <e105711@ie.u-ryukyu.ac.jp> |
---|---|
date | Tue, 13 Oct 2015 17:48:58 +0900 |
parents | 60c9769439b8 |
children |
comparison
equal
deleted
inserted
replaced
84:f3e34b893a5f | 95:afa8332a0e37 |
---|---|
1 ; RUN: llc -O0 -fast-isel-abort -verify-machineinstrs -mtriple=arm64-apple-darwin -mcpu=cyclone < %s | FileCheck %s | 1 ; RUN: llc -O0 -fast-isel-abort=1 -verify-machineinstrs -mtriple=arm64-apple-darwin -mcpu=cyclone < %s | FileCheck %s |
2 | 2 |
3 ;; Test various conversions. | 3 ;; Test various conversions. |
4 define zeroext i32 @trunc_(i8 zeroext %a, i16 zeroext %b, i32 %c, i64 %d) nounwind ssp { | 4 define zeroext i32 @trunc_(i8 zeroext %a, i16 zeroext %b, i32 %c, i64 %d) nounwind ssp { |
5 entry: | 5 entry: |
6 ; CHECK-LABEL: trunc_ | 6 ; CHECK-LABEL: trunc_ |
25 %d.addr = alloca i64, align 8 | 25 %d.addr = alloca i64, align 8 |
26 store i8 %a, i8* %a.addr, align 1 | 26 store i8 %a, i8* %a.addr, align 1 |
27 store i16 %b, i16* %b.addr, align 2 | 27 store i16 %b, i16* %b.addr, align 2 |
28 store i32 %c, i32* %c.addr, align 4 | 28 store i32 %c, i32* %c.addr, align 4 |
29 store i64 %d, i64* %d.addr, align 8 | 29 store i64 %d, i64* %d.addr, align 8 |
30 %tmp = load i64* %d.addr, align 8 | 30 %tmp = load i64, i64* %d.addr, align 8 |
31 %conv = trunc i64 %tmp to i32 | 31 %conv = trunc i64 %tmp to i32 |
32 store i32 %conv, i32* %c.addr, align 4 | 32 store i32 %conv, i32* %c.addr, align 4 |
33 %tmp1 = load i32* %c.addr, align 4 | 33 %tmp1 = load i32, i32* %c.addr, align 4 |
34 %conv2 = trunc i32 %tmp1 to i16 | 34 %conv2 = trunc i32 %tmp1 to i16 |
35 store i16 %conv2, i16* %b.addr, align 2 | 35 store i16 %conv2, i16* %b.addr, align 2 |
36 %tmp3 = load i16* %b.addr, align 2 | 36 %tmp3 = load i16, i16* %b.addr, align 2 |
37 %conv4 = trunc i16 %tmp3 to i8 | 37 %conv4 = trunc i16 %tmp3 to i8 |
38 store i8 %conv4, i8* %a.addr, align 1 | 38 store i8 %conv4, i8* %a.addr, align 1 |
39 %tmp5 = load i8* %a.addr, align 1 | 39 %tmp5 = load i8, i8* %a.addr, align 1 |
40 %conv6 = zext i8 %tmp5 to i32 | 40 %conv6 = zext i8 %tmp5 to i32 |
41 ret i32 %conv6 | 41 ret i32 %conv6 |
42 } | 42 } |
43 | 43 |
44 define i64 @zext_(i8 zeroext %a, i16 zeroext %b, i32 %c, i64 %d) nounwind ssp { | 44 define i64 @zext_(i8 zeroext %a, i16 zeroext %b, i32 %c, i64 %d) nounwind ssp { |
64 %d.addr = alloca i64, align 8 | 64 %d.addr = alloca i64, align 8 |
65 store i8 %a, i8* %a.addr, align 1 | 65 store i8 %a, i8* %a.addr, align 1 |
66 store i16 %b, i16* %b.addr, align 2 | 66 store i16 %b, i16* %b.addr, align 2 |
67 store i32 %c, i32* %c.addr, align 4 | 67 store i32 %c, i32* %c.addr, align 4 |
68 store i64 %d, i64* %d.addr, align 8 | 68 store i64 %d, i64* %d.addr, align 8 |
69 %tmp = load i8* %a.addr, align 1 | 69 %tmp = load i8, i8* %a.addr, align 1 |
70 %conv = zext i8 %tmp to i16 | 70 %conv = zext i8 %tmp to i16 |
71 store i16 %conv, i16* %b.addr, align 2 | 71 store i16 %conv, i16* %b.addr, align 2 |
72 %tmp1 = load i16* %b.addr, align 2 | 72 %tmp1 = load i16, i16* %b.addr, align 2 |
73 %conv2 = zext i16 %tmp1 to i32 | 73 %conv2 = zext i16 %tmp1 to i32 |
74 store i32 %conv2, i32* %c.addr, align 4 | 74 store i32 %conv2, i32* %c.addr, align 4 |
75 %tmp3 = load i32* %c.addr, align 4 | 75 %tmp3 = load i32, i32* %c.addr, align 4 |
76 %conv4 = zext i32 %tmp3 to i64 | 76 %conv4 = zext i32 %tmp3 to i64 |
77 store i64 %conv4, i64* %d.addr, align 8 | 77 store i64 %conv4, i64* %d.addr, align 8 |
78 %tmp5 = load i64* %d.addr, align 8 | 78 %tmp5 = load i64, i64* %d.addr, align 8 |
79 ret i64 %tmp5 | 79 ret i64 %tmp5 |
80 } | 80 } |
81 | 81 |
82 define i32 @zext_i1_i32(i1 zeroext %a) nounwind ssp { | 82 define i32 @zext_i1_i32(i1 zeroext %a) nounwind ssp { |
83 entry: | 83 entry: |
119 %d.addr = alloca i64, align 8 | 119 %d.addr = alloca i64, align 8 |
120 store i8 %a, i8* %a.addr, align 1 | 120 store i8 %a, i8* %a.addr, align 1 |
121 store i16 %b, i16* %b.addr, align 2 | 121 store i16 %b, i16* %b.addr, align 2 |
122 store i32 %c, i32* %c.addr, align 4 | 122 store i32 %c, i32* %c.addr, align 4 |
123 store i64 %d, i64* %d.addr, align 8 | 123 store i64 %d, i64* %d.addr, align 8 |
124 %tmp = load i8* %a.addr, align 1 | 124 %tmp = load i8, i8* %a.addr, align 1 |
125 %conv = sext i8 %tmp to i16 | 125 %conv = sext i8 %tmp to i16 |
126 store i16 %conv, i16* %b.addr, align 2 | 126 store i16 %conv, i16* %b.addr, align 2 |
127 %tmp1 = load i16* %b.addr, align 2 | 127 %tmp1 = load i16, i16* %b.addr, align 2 |
128 %conv2 = sext i16 %tmp1 to i32 | 128 %conv2 = sext i16 %tmp1 to i32 |
129 store i32 %conv2, i32* %c.addr, align 4 | 129 store i32 %conv2, i32* %c.addr, align 4 |
130 %tmp3 = load i32* %c.addr, align 4 | 130 %tmp3 = load i32, i32* %c.addr, align 4 |
131 %conv4 = sext i32 %tmp3 to i64 | 131 %conv4 = sext i32 %tmp3 to i64 |
132 store i64 %conv4, i64* %d.addr, align 8 | 132 store i64 %conv4, i64* %d.addr, align 8 |
133 %tmp5 = load i64* %d.addr, align 8 | 133 %tmp5 = load i64, i64* %d.addr, align 8 |
134 ret i64 %tmp5 | 134 ret i64 %tmp5 |
135 } | 135 } |
136 | 136 |
137 ; Test sext i8 to i64 | 137 ; Test sext i8 to i64 |
138 | 138 |
407 ; CHECK: and [[REG3:w[0-9]+]], w[[REG2]], #0xff | 407 ; CHECK: and [[REG3:w[0-9]+]], w[[REG2]], #0xff |
408 ; CHECK: strb [[REG3]], [sp, #15] | 408 ; CHECK: strb [[REG3]], [sp, #15] |
409 ; CHECK: add sp, sp, #16 | 409 ; CHECK: add sp, sp, #16 |
410 %a = alloca i8, align 1 | 410 %a = alloca i8, align 1 |
411 %b = alloca i64, align 8 | 411 %b = alloca i64, align 8 |
412 %c = load i64* %b, align 8 | 412 %c = load i64, i64* %b, align 8 |
413 %d = trunc i64 %c to i8 | 413 %d = trunc i64 %c to i8 |
414 store i8 %d, i8* %a, align 1 | 414 store i8 %d, i8* %a, align 1 |
415 ret void | 415 ret void |
416 } | 416 } |
417 | 417 |