150
|
1 #!/usr/bin/env python
|
|
2 #
|
|
3 # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
4 # See https://llvm.org/LICENSE.txt for license information.
|
|
5 # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
6 #
|
|
7 # ==------------------------------------------------------------------------==#
|
|
8
|
|
9 import os
|
|
10 import glob
|
|
11 import re
|
|
12 import subprocess
|
|
13 import json
|
|
14 import datetime
|
|
15 import argparse
|
|
16 try:
|
|
17 from urllib.parse import urlencode
|
|
18 from urllib.request import urlopen, Request
|
|
19 except ImportError:
|
|
20 from urllib import urlencode
|
|
21 from urllib2 import urlopen, Request
|
|
22
|
|
23
|
|
24 parser = argparse.ArgumentParser()
|
|
25 parser.add_argument('benchmark_directory')
|
|
26 parser.add_argument('--runs', type=int, default=10)
|
|
27 parser.add_argument('--wrapper', default='')
|
|
28 parser.add_argument('--machine', required=True)
|
|
29 parser.add_argument('--revision', required=True)
|
|
30 parser.add_argument('--threads', action='store_true')
|
|
31 parser.add_argument('--url', help='The lnt server url to send the results to',
|
|
32 default='http://localhost:8000/db_default/v4/link/submitRun')
|
|
33 args = parser.parse_args()
|
|
34
|
|
35 class Bench:
|
|
36 def __init__(self, directory, variant):
|
|
37 self.directory = directory
|
|
38 self.variant = variant
|
|
39 def __str__(self):
|
|
40 if not self.variant:
|
|
41 return self.directory
|
|
42 return '%s-%s' % (self.directory, self.variant)
|
|
43
|
|
44 def getBenchmarks():
|
|
45 ret = []
|
|
46 for i in glob.glob('*/response*.txt'):
|
|
47 m = re.match('response-(.*)\.txt', os.path.basename(i))
|
|
48 variant = m.groups()[0] if m else None
|
|
49 ret.append(Bench(os.path.dirname(i), variant))
|
|
50 return ret
|
|
51
|
|
52 def parsePerfNum(num):
|
|
53 num = num.replace(b',',b'')
|
|
54 try:
|
|
55 return int(num)
|
|
56 except ValueError:
|
|
57 return float(num)
|
|
58
|
|
59 def parsePerfLine(line):
|
|
60 ret = {}
|
|
61 line = line.split(b'#')[0].strip()
|
|
62 if len(line) != 0:
|
|
63 p = line.split()
|
|
64 ret[p[1].strip().decode('ascii')] = parsePerfNum(p[0])
|
|
65 return ret
|
|
66
|
|
67 def parsePerf(output):
|
|
68 ret = {}
|
|
69 lines = [x.strip() for x in output.split(b'\n')]
|
|
70
|
|
71 seconds = [x for x in lines if b'seconds time elapsed' in x][0]
|
|
72 seconds = seconds.strip().split()[0].strip()
|
|
73 ret['seconds-elapsed'] = parsePerfNum(seconds)
|
|
74
|
|
75 measurement_lines = [x for x in lines if b'#' in x]
|
|
76 for l in measurement_lines:
|
|
77 ret.update(parsePerfLine(l))
|
|
78 return ret
|
|
79
|
|
80 def run(cmd):
|
|
81 try:
|
|
82 return subprocess.check_output(cmd, stderr=subprocess.STDOUT)
|
|
83 except subprocess.CalledProcessError as e:
|
|
84 print(e.output)
|
|
85 raise e
|
|
86
|
|
87 def combinePerfRun(acc, d):
|
|
88 for k,v in d.items():
|
|
89 a = acc.get(k, [])
|
|
90 a.append(v)
|
|
91 acc[k] = a
|
|
92
|
|
93 def perf(cmd):
|
|
94 # Discard the first run to warm up any system cache.
|
|
95 run(cmd)
|
|
96
|
|
97 ret = {}
|
|
98 wrapper_args = [x for x in args.wrapper.split(',') if x]
|
|
99 for i in range(args.runs):
|
|
100 os.unlink('t')
|
|
101 out = run(wrapper_args + ['perf', 'stat'] + cmd)
|
|
102 r = parsePerf(out)
|
|
103 combinePerfRun(ret, r)
|
|
104 os.unlink('t')
|
|
105 return ret
|
|
106
|
|
107 def runBench(bench):
|
|
108 thread_arg = [] if args.threads else ['--no-threads']
|
|
109 os.chdir(bench.directory)
|
|
110 suffix = '-%s' % bench.variant if bench.variant else ''
|
|
111 response = 'response' + suffix + '.txt'
|
|
112 ret = perf(['../ld.lld', '@' + response, '-o', 't'] + thread_arg)
|
|
113 ret['name'] = str(bench)
|
|
114 os.chdir('..')
|
|
115 return ret
|
|
116
|
|
117 def buildLntJson(benchmarks):
|
|
118 start = datetime.datetime.utcnow().isoformat()
|
|
119 tests = [runBench(b) for b in benchmarks]
|
|
120 end = datetime.datetime.utcnow().isoformat()
|
|
121 ret = {
|
|
122 'format_version' : 2,
|
|
123 'machine' : { 'name' : args.machine },
|
|
124 'run' : {
|
|
125 'end_time' : start,
|
|
126 'start_time' : end,
|
|
127 'llvm_project_revision': args.revision
|
|
128 },
|
|
129 'tests' : tests
|
|
130 }
|
|
131 return json.dumps(ret, sort_keys=True, indent=4)
|
|
132
|
|
133 def submitToServer(data):
|
|
134 data2 = urlencode({ 'input_data' : data }).encode('ascii')
|
|
135 urlopen(Request(args.url, data2))
|
|
136
|
|
137 os.chdir(args.benchmark_directory)
|
|
138 data = buildLntJson(getBenchmarks())
|
|
139 submitToServer(data)
|