Mercurial > hg > CbC > CbC_llvm
comparison llvm/test/CodeGen/AMDGPU/verify-gfx90a-aligned-vgprs.mir @ 221:79ff65ed7e25
LLVM12 Original
author | Shinji KONO <kono@ie.u-ryukyu.ac.jp> |
---|---|
date | Tue, 15 Jun 2021 19:15:29 +0900 |
parents | |
children | c4bab56944e8 |
comparison
equal
deleted
inserted
replaced
220:42394fc6a535 | 221:79ff65ed7e25 |
---|---|
1 # RUN: not --crash llc -march=amdgcn -mcpu=gfx90a -run-pass=machineverifier -o /dev/null %s 2>&1 | FileCheck %s | |
2 | |
3 # Implicit uses are OK. | |
4 --- | |
5 name: implicit_use | |
6 body: | | |
7 bb.0: | |
8 $vgpr1_vgpr2 = IMPLICIT_DEF | |
9 S_NOP 0, implicit $vgpr1_vgpr2 | |
10 %0:vreg_64 = IMPLICIT_DEF | |
11 S_NOP 0, implicit %0 | |
12 | |
13 %1:sreg_64_xexec = IMPLICIT_DEF | |
14 %2:sreg_64_xexec = SI_CALL %1, 0, csr_amdgpu_highregs, implicit $vgpr1_vgpr2 | |
15 | |
16 ; noreg is OK | |
17 DS_WRITE_B64_gfx9 $noreg, $noreg, 0, 0, implicit $exec | |
18 ... | |
19 | |
20 # The unaligned registers are allowed to exist, just not on any tuple instructions. | |
21 | |
22 --- | |
23 name: copy_like_generic | |
24 body: | | |
25 bb.0: | |
26 $vgpr1_vgpr2 = IMPLICIT_DEF | |
27 $vgpr3_vgpr4 = COPY $vgpr1_vgpr2 | |
28 %0:vreg_64 = IMPLICIT_DEF | |
29 %1:vreg_64 = COPY %0 | |
30 ... | |
31 | |
32 --- | |
33 name: mov_32_unaligned_super | |
34 body: | | |
35 bb.0: | |
36 undef %0.sub1:vreg_64 = V_MOV_B32_e32 0, implicit $exec | |
37 %1:vgpr_32 = V_MOV_B32_e32 undef %2.sub1:vreg_64, implicit $exec | |
38 ... | |
39 | |
40 # Well-aligned subregister indexes are OK | |
41 --- | |
42 name: aligned_sub_reg | |
43 body: | | |
44 bb.0: | |
45 %0:vreg_64_align2 = IMPLICIT_DEF | |
46 %1:vreg_128_align2 = IMPLICIT_DEF | |
47 GLOBAL_STORE_DWORDX2 %0, %1.sub0_sub1, 0, 0, implicit $exec | |
48 GLOBAL_STORE_DWORDX2 %0, %1.sub2_sub3, 0, 0, implicit $exec | |
49 ... | |
50 | |
51 --- | |
52 name: unaligned_registers | |
53 body: | | |
54 bb.0: | |
55 liveins: $vgpr0_vgpr1, $vgpr3_vgpr4_vgpr5_vgpr6 | |
56 %0:vreg_64_align2 = IMPLICIT_DEF | |
57 %1:vreg_64 = IMPLICIT_DEF | |
58 %2:vreg_96 = IMPLICIT_DEF | |
59 %3:vreg_128 = IMPLICIT_DEF | |
60 %4:areg_64 = IMPLICIT_DEF | |
61 %5:vreg_128_align2 = IMPLICIT_DEF | |
62 | |
63 ; Check virtual register uses | |
64 ; CHECK: *** Bad machine code: Subtarget requires even aligned vector registers *** | |
65 ; CHECK: *** Bad machine code: Subtarget requires even aligned vector registers *** | |
66 ; CHECK: *** Bad machine code: Subtarget requires even aligned vector registers *** | |
67 GLOBAL_STORE_DWORDX2 %0, %1, 0, 0, implicit $exec | |
68 GLOBAL_STORE_DWORDX3 %0, %2, 0, 0, implicit $exec | |
69 GLOBAL_STORE_DWORDX4 %0, %3, 0, 0, implicit $exec | |
70 | |
71 ; Check virtual registers with subregisters | |
72 ; CHECK: *** Bad machine code: Subtarget requires even aligned vector registers *** | |
73 ; CHECK: *** Bad machine code: Subtarget requires even aligned vector registers *** | |
74 ; CHECK: *** Bad machine code: Subtarget requires even aligned vector registers *** | |
75 ; CHECK: *** Bad machine code: Subtarget requires even aligned vector registers *** | |
76 GLOBAL_STORE_DWORDX2 %0, %3.sub0_sub1, 0, 0, implicit $exec | |
77 GLOBAL_STORE_DWORDX2 %0, %3.sub2_sub3, 0, 0, implicit $exec | |
78 GLOBAL_STORE_DWORDX2 %0, %3.sub1_sub2, 0, 0, implicit $exec | |
79 GLOBAL_STORE_DWORDX2 %0, %5.sub1_sub2, 0, 0, implicit $exec | |
80 | |
81 ; Check physical register uses | |
82 ; CHECK: *** Bad machine code: Subtarget requires even aligned vector registers *** | |
83 ; CHECK: *** Bad machine code: Subtarget requires even aligned vector registers *** | |
84 ; CHECK: *** Bad machine code: Subtarget requires even aligned vector registers *** | |
85 GLOBAL_STORE_DWORDX2 $vgpr0_vgpr1, $vgpr3_vgpr4, 0, 0, implicit $exec | |
86 GLOBAL_STORE_DWORDX3 $vgpr0_vgpr1, $vgpr3_vgpr4_vgpr5, 0, 0, implicit $exec | |
87 GLOBAL_STORE_DWORDX4 $vgpr0_vgpr1, $vgpr3_vgpr4_vgpr5_vgpr6, 0, 0, implicit $exec | |
88 | |
89 ; Check virtual register defs | |
90 ; CHECK: *** Bad machine code: Subtarget requires even aligned vector registers *** | |
91 ; CHECK: *** Bad machine code: Subtarget requires even aligned vector registers *** | |
92 ; CHECK: *** Bad machine code: Subtarget requires even aligned vector registers *** | |
93 %6:vreg_64 = GLOBAL_LOAD_DWORDX2 %0, 0, 0, implicit $exec | |
94 %7:vreg_96 = GLOBAL_LOAD_DWORDX3 %0, 0, 0, implicit $exec | |
95 %8:vreg_128 = GLOBAL_LOAD_DWORDX4 %0, 0, 0, implicit $exec | |
96 | |
97 ; CHECK: *** Bad machine code: Subtarget requires even aligned vector registers *** | |
98 ; CHECK: *** Bad machine code: Subtarget requires even aligned vector registers *** | |
99 ; CHECK: *** Bad machine code: Subtarget requires even aligned vector registers *** | |
100 $vgpr1_vgpr2 = GLOBAL_LOAD_DWORDX2 %0, 0, 0, implicit $exec | |
101 $vgpr1_vgpr2_vgpr3 = GLOBAL_LOAD_DWORDX3 %0, 0, 0, implicit $exec | |
102 $vgpr1_vgpr2_vgpr3_vgpr4 = GLOBAL_LOAD_DWORDX4 %0, 0, 0, implicit $exec | |
103 | |
104 ; Check AGPRs | |
105 ; CHECK: *** Bad machine code: Subtarget requires even aligned vector registers *** | |
106 ; CHECK: *** Bad machine code: Subtarget requires even aligned vector registers *** | |
107 %9:vgpr_32 = IMPLICIT_DEF | |
108 %10:areg_64 = IMPLICIT_DEF | |
109 %11:areg_128_align2 = IMPLICIT_DEF | |
110 DS_WRITE_B64_gfx9 %9, %10, 0, 0, implicit $exec | |
111 DS_WRITE_B64_gfx9 %9, %11.sub1_sub2, 0, 0, implicit $exec | |
112 ... | |
113 | |
114 # FIXME: Inline asm is not verified | |
115 # ; Check inline asm | |
116 # ; XCHECK: *** Bad machine code: Subtarget requires even aligned vector registers *** | |
117 # ; XCHECK: *** Bad machine code: Subtarget requires even aligned vector registers *** | |
118 # ; XCHECK: *** Bad machine code: Subtarget requires even aligned vector registers *** | |
119 # INLINEASM &"; use $0 ", 1 /* sideeffect attdialect */, 9 /* reguse */, $vgpr1_vgpr2 | |
120 # INLINEASM &"; use $0 ", 1 /* sideeffect attdialect */, 9 /* reguse */, %4 | |
121 # INLINEASM &"; use $0 ", 1 /* sideeffect attdialect */, 9 /* reguse */, %5.sub1_sub2 |