def benchmark(self, shell, env, args): if args != None: args = '--args=' + ' '.join(args) else: args = '' output = utils.RunTimedCheckOutput([ "./sunspider", "--shell=" + shell, "--runs=" + str(self.runs), args ], env=env) tests = [] lines = output.splitlines() found = False for x in lines: if x == "--------------------------------------------" or \ x == "-----------------------------------------------": found = True if x[0:5] == "Total": m = re.search(":\s+(\d+\.\d+)ms", x) tests.append({'name': '__total__', 'time': m.group(1)}) print(m.group(1) + ' - __total__') elif found == True and x[0:4] == " ": m = re.search(" (.+):\s+(\d+\.\d+)ms", x) if m != None: tests.append({'name': m.group(1), 'time': m.group(2)}) print(m.group(2) + ' - ' + m.group(1)) if found == False: print(output) raise Exception("output marker not found") return tests
def benchmark(self, shell, env, args): full_args = [shell] if args: full_args.extend(args) full_args.append('run.js') print(os.getcwd()) output = utils.RunTimedCheckOutput(full_args, env=env) tests = [] lines = output.splitlines() total = 0.0 for x in lines: m = re.search("(.+)\(RunTime\): (\d+\.\d+)", x) if not m: continue name = m.group(1) score = float(m.group(2)) / 1000 total += score tests.append({'name': name, 'time': score}) print(str(score) + ' - ' + name) tests.append({'name': '__total__', 'time': total}) return tests
def _run(self, submit, native, modes): # Run the C++ mode. full_args = [utils.config.PythonName, 'harness.py', '--native'] full_args += ['--cc="' + native.cc + '"'] full_args += ['--cxx="' + native.cxx + '"'] full_args += ['--'] + native.args output = utils.RunTimedCheckOutput(full_args) tests = self.parse(output) submit.AddTests(tests, self.suite, self.version, native.mode) # Run normal benchmarks. super(AsmJS, self)._run(submit, native, modes)
def benchmark(self, shell, env, args): with utils.chdir("/tmp/"): full_args = [shell] if args: full_args.extend(args) full_args.append('build/ts/shell.js') if "WebKit" in shell: full_args.append('--') if "v8" in shell: full_args.append('--') full_args.append('-x') tests = [] totalscore = 0 bench_path = os.path.join(utils.config.BenchmarkPath, self.folder) for name in [ "crypto", "deltablue", "raytrace", "richards", "splay" ]: output = utils.RunTimedCheckOutput( full_args + [os.path.join(bench_path, name + ".swf")], env=env) lines = output.splitlines() for x in lines: m = re.search("NotifyScore (\d+)", x) if not m: continue score = m.group(1) totalscore += int(score) tests.append({'name': name, 'time': score}) print(score + ' - ' + name) if len(tests) > 0: tests.append({ 'name': '__total__', 'time': totalscore / len(tests) }) return tests
def benchmark(self, shell, env, args): full_args = [shell] if args: full_args.extend(args) full_args.append('run.js') print(os.getcwd()) output = utils.RunTimedCheckOutput(full_args, env=env) tests = [] lines = output.splitlines() for x in lines: m = re.search("(.+): (\d+)", x) if not m: continue name = m.group(1) score = m.group(2) if name[0:5] == "Score": name = "__total__" tests.append({'name': name, 'time': score}) print(score + ' - ' + name) return tests
def benchmark(self, shell, env, args): full_args = [utils.config.PythonName, 'harness.py', shell, '--'] + args print(' '.join(full_args)) output = utils.RunTimedCheckOutput(full_args, env=env) return self.parse(output)