Mercurial > hg > CbC > CbC_llvm
comparison test/CodeGen/PowerPC/ctrloop-s000.ll @ 0:95c75e76d11b LLVM3.4
LLVM 3.4
author | Kaito Tokumori <e105711@ie.u-ryukyu.ac.jp> |
---|---|
date | Thu, 12 Dec 2013 13:56:28 +0900 |
parents | |
children | afa8332a0e37 |
comparison
equal
deleted
inserted
replaced
-1:000000000000 | 0:95c75e76d11b |
---|---|
1 ; ModuleID = 'tsc_s000.c' | |
2 target datalayout = "E-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32:64" | |
3 target triple = "powerpc64-unknown-linux-gnu" | |
4 ; RUN: llc < %s -march=ppc64 | FileCheck %s | |
5 | |
6 @Y = common global [16000 x double] zeroinitializer, align 32 | |
7 @X = common global [16000 x double] zeroinitializer, align 32 | |
8 @Z = common global [16000 x double] zeroinitializer, align 32 | |
9 @U = common global [16000 x double] zeroinitializer, align 32 | |
10 @V = common global [16000 x double] zeroinitializer, align 32 | |
11 @aa = common global [256 x [256 x double]] zeroinitializer, align 32 | |
12 @bb = common global [256 x [256 x double]] zeroinitializer, align 32 | |
13 @cc = common global [256 x [256 x double]] zeroinitializer, align 32 | |
14 @array = common global [65536 x double] zeroinitializer, align 32 | |
15 @x = common global [16000 x double] zeroinitializer, align 32 | |
16 @temp = common global double 0.000000e+00, align 8 | |
17 @temp_int = common global i32 0, align 4 | |
18 @a = common global [16000 x double] zeroinitializer, align 32 | |
19 @b = common global [16000 x double] zeroinitializer, align 32 | |
20 @c = common global [16000 x double] zeroinitializer, align 32 | |
21 @d = common global [16000 x double] zeroinitializer, align 32 | |
22 @e = common global [16000 x double] zeroinitializer, align 32 | |
23 @tt = common global [256 x [256 x double]] zeroinitializer, align 32 | |
24 @indx = common global [16000 x i32] zeroinitializer, align 32 | |
25 @xx = common global double* null, align 8 | |
26 @yy = common global double* null, align 8 | |
27 | |
28 define i32 @s000() nounwind { | |
29 entry: | |
30 br label %for.cond1.preheader | |
31 | |
32 for.cond1.preheader: ; preds = %for.end, %entry | |
33 %nl.010 = phi i32 [ 0, %entry ], [ %inc7, %for.end ] | |
34 br label %for.body3 | |
35 | |
36 for.body3: ; preds = %for.body3, %for.cond1.preheader | |
37 %indvars.iv = phi i64 [ 0, %for.cond1.preheader ], [ %indvars.iv.next.15, %for.body3 ] | |
38 %arrayidx = getelementptr inbounds [16000 x double]* @Y, i64 0, i64 %indvars.iv | |
39 %0 = load double* %arrayidx, align 32 | |
40 %add = fadd double %0, 1.000000e+00 | |
41 %arrayidx5 = getelementptr inbounds [16000 x double]* @X, i64 0, i64 %indvars.iv | |
42 store double %add, double* %arrayidx5, align 32 | |
43 %indvars.iv.next11 = or i64 %indvars.iv, 1 | |
44 %arrayidx.1 = getelementptr inbounds [16000 x double]* @Y, i64 0, i64 %indvars.iv.next11 | |
45 %1 = load double* %arrayidx.1, align 8 | |
46 %add.1 = fadd double %1, 1.000000e+00 | |
47 %arrayidx5.1 = getelementptr inbounds [16000 x double]* @X, i64 0, i64 %indvars.iv.next11 | |
48 store double %add.1, double* %arrayidx5.1, align 8 | |
49 %indvars.iv.next.112 = or i64 %indvars.iv, 2 | |
50 %arrayidx.2 = getelementptr inbounds [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.112 | |
51 %2 = load double* %arrayidx.2, align 16 | |
52 %add.2 = fadd double %2, 1.000000e+00 | |
53 %arrayidx5.2 = getelementptr inbounds [16000 x double]* @X, i64 0, i64 %indvars.iv.next.112 | |
54 store double %add.2, double* %arrayidx5.2, align 16 | |
55 %indvars.iv.next.213 = or i64 %indvars.iv, 3 | |
56 %arrayidx.3 = getelementptr inbounds [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.213 | |
57 %3 = load double* %arrayidx.3, align 8 | |
58 %add.3 = fadd double %3, 1.000000e+00 | |
59 %arrayidx5.3 = getelementptr inbounds [16000 x double]* @X, i64 0, i64 %indvars.iv.next.213 | |
60 store double %add.3, double* %arrayidx5.3, align 8 | |
61 %indvars.iv.next.314 = or i64 %indvars.iv, 4 | |
62 %arrayidx.4 = getelementptr inbounds [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.314 | |
63 %4 = load double* %arrayidx.4, align 32 | |
64 %add.4 = fadd double %4, 1.000000e+00 | |
65 %arrayidx5.4 = getelementptr inbounds [16000 x double]* @X, i64 0, i64 %indvars.iv.next.314 | |
66 store double %add.4, double* %arrayidx5.4, align 32 | |
67 %indvars.iv.next.415 = or i64 %indvars.iv, 5 | |
68 %arrayidx.5 = getelementptr inbounds [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.415 | |
69 %5 = load double* %arrayidx.5, align 8 | |
70 %add.5 = fadd double %5, 1.000000e+00 | |
71 %arrayidx5.5 = getelementptr inbounds [16000 x double]* @X, i64 0, i64 %indvars.iv.next.415 | |
72 store double %add.5, double* %arrayidx5.5, align 8 | |
73 %indvars.iv.next.516 = or i64 %indvars.iv, 6 | |
74 %arrayidx.6 = getelementptr inbounds [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.516 | |
75 %6 = load double* %arrayidx.6, align 16 | |
76 %add.6 = fadd double %6, 1.000000e+00 | |
77 %arrayidx5.6 = getelementptr inbounds [16000 x double]* @X, i64 0, i64 %indvars.iv.next.516 | |
78 store double %add.6, double* %arrayidx5.6, align 16 | |
79 %indvars.iv.next.617 = or i64 %indvars.iv, 7 | |
80 %arrayidx.7 = getelementptr inbounds [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.617 | |
81 %7 = load double* %arrayidx.7, align 8 | |
82 %add.7 = fadd double %7, 1.000000e+00 | |
83 %arrayidx5.7 = getelementptr inbounds [16000 x double]* @X, i64 0, i64 %indvars.iv.next.617 | |
84 store double %add.7, double* %arrayidx5.7, align 8 | |
85 %indvars.iv.next.718 = or i64 %indvars.iv, 8 | |
86 %arrayidx.8 = getelementptr inbounds [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.718 | |
87 %8 = load double* %arrayidx.8, align 32 | |
88 %add.8 = fadd double %8, 1.000000e+00 | |
89 %arrayidx5.8 = getelementptr inbounds [16000 x double]* @X, i64 0, i64 %indvars.iv.next.718 | |
90 store double %add.8, double* %arrayidx5.8, align 32 | |
91 %indvars.iv.next.819 = or i64 %indvars.iv, 9 | |
92 %arrayidx.9 = getelementptr inbounds [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.819 | |
93 %9 = load double* %arrayidx.9, align 8 | |
94 %add.9 = fadd double %9, 1.000000e+00 | |
95 %arrayidx5.9 = getelementptr inbounds [16000 x double]* @X, i64 0, i64 %indvars.iv.next.819 | |
96 store double %add.9, double* %arrayidx5.9, align 8 | |
97 %indvars.iv.next.920 = or i64 %indvars.iv, 10 | |
98 %arrayidx.10 = getelementptr inbounds [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.920 | |
99 %10 = load double* %arrayidx.10, align 16 | |
100 %add.10 = fadd double %10, 1.000000e+00 | |
101 %arrayidx5.10 = getelementptr inbounds [16000 x double]* @X, i64 0, i64 %indvars.iv.next.920 | |
102 store double %add.10, double* %arrayidx5.10, align 16 | |
103 %indvars.iv.next.1021 = or i64 %indvars.iv, 11 | |
104 %arrayidx.11 = getelementptr inbounds [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.1021 | |
105 %11 = load double* %arrayidx.11, align 8 | |
106 %add.11 = fadd double %11, 1.000000e+00 | |
107 %arrayidx5.11 = getelementptr inbounds [16000 x double]* @X, i64 0, i64 %indvars.iv.next.1021 | |
108 store double %add.11, double* %arrayidx5.11, align 8 | |
109 %indvars.iv.next.1122 = or i64 %indvars.iv, 12 | |
110 %arrayidx.12 = getelementptr inbounds [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.1122 | |
111 %12 = load double* %arrayidx.12, align 32 | |
112 %add.12 = fadd double %12, 1.000000e+00 | |
113 %arrayidx5.12 = getelementptr inbounds [16000 x double]* @X, i64 0, i64 %indvars.iv.next.1122 | |
114 store double %add.12, double* %arrayidx5.12, align 32 | |
115 %indvars.iv.next.1223 = or i64 %indvars.iv, 13 | |
116 %arrayidx.13 = getelementptr inbounds [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.1223 | |
117 %13 = load double* %arrayidx.13, align 8 | |
118 %add.13 = fadd double %13, 1.000000e+00 | |
119 %arrayidx5.13 = getelementptr inbounds [16000 x double]* @X, i64 0, i64 %indvars.iv.next.1223 | |
120 store double %add.13, double* %arrayidx5.13, align 8 | |
121 %indvars.iv.next.1324 = or i64 %indvars.iv, 14 | |
122 %arrayidx.14 = getelementptr inbounds [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.1324 | |
123 %14 = load double* %arrayidx.14, align 16 | |
124 %add.14 = fadd double %14, 1.000000e+00 | |
125 %arrayidx5.14 = getelementptr inbounds [16000 x double]* @X, i64 0, i64 %indvars.iv.next.1324 | |
126 store double %add.14, double* %arrayidx5.14, align 16 | |
127 %indvars.iv.next.1425 = or i64 %indvars.iv, 15 | |
128 %arrayidx.15 = getelementptr inbounds [16000 x double]* @Y, i64 0, i64 %indvars.iv.next.1425 | |
129 %15 = load double* %arrayidx.15, align 8 | |
130 %add.15 = fadd double %15, 1.000000e+00 | |
131 %arrayidx5.15 = getelementptr inbounds [16000 x double]* @X, i64 0, i64 %indvars.iv.next.1425 | |
132 store double %add.15, double* %arrayidx5.15, align 8 | |
133 %indvars.iv.next.15 = add i64 %indvars.iv, 16 | |
134 %lftr.wideiv.15 = trunc i64 %indvars.iv.next.15 to i32 | |
135 %exitcond.15 = icmp eq i32 %lftr.wideiv.15, 16000 | |
136 br i1 %exitcond.15, label %for.end, label %for.body3 | |
137 | |
138 for.end: ; preds = %for.body3 | |
139 %call = tail call i32 @dummy(double* getelementptr inbounds ([16000 x double]* @X, i64 0, i64 0), double* getelementptr inbounds ([16000 x double]* @Y, i64 0, i64 0), double* getelementptr inbounds ([16000 x double]* @Z, i64 0, i64 0), double* getelementptr inbounds ([16000 x double]* @U, i64 0, i64 0), double* getelementptr inbounds ([16000 x double]* @V, i64 0, i64 0), [256 x double]* getelementptr inbounds ([256 x [256 x double]]* @aa, i64 0, i64 0), [256 x double]* getelementptr inbounds ([256 x [256 x double]]* @bb, i64 0, i64 0), [256 x double]* getelementptr inbounds ([256 x [256 x double]]* @cc, i64 0, i64 0), double 0.000000e+00) nounwind | |
140 %inc7 = add nsw i32 %nl.010, 1 | |
141 %exitcond = icmp eq i32 %inc7, 400000 | |
142 br i1 %exitcond, label %for.end8, label %for.cond1.preheader | |
143 | |
144 for.end8: ; preds = %for.end | |
145 ret i32 0 | |
146 | |
147 ; CHECK: @s000 | |
148 ; CHECK: mtctr | |
149 ; CHECK: bdnz | |
150 } | |
151 | |
152 declare i32 @dummy(double*, double*, double*, double*, double*, [256 x double]*, [256 x double]*, [256 x double]*, double) |