def specific_stats(browser_perf, time_size, cl): """Return a string detailing all of the gory details and specifics on benchmark numbers and individual benchmark changes. Args: browser_perf: BrowserPerformanceTestRunner object. Holds browser perf stats. time_size: CompileTimeSizeTestRunner object. cl: CommandLinePerformanceTestRunner object. """ output = "\n\n---------------------------------------------\nThe latest " + \ "current raw numbers (and changes) for those " + \ "interested:\nBrowser performance:\n" for v8_or_frog in V8_AND_FROG: for browser in create_graph.get_browsers(): output += " %s %s:\n" % (browser, v8_or_frog) for benchmark in create_graph.get_benchmarks(): bench_list = browser_perf.values_dict[browser][v8_or_frog][benchmark] output += " %s %s%s\n" % ((benchmark + ':').ljust(25), str(bench_list[1]).rjust(10), get_amount_changed(bench_list)) output += "\nCompile Size and Time for frog:\n" for metric in time_size.values_list: metric_list = time_size.values_dict[COMMAND_LINE][FROG][metric] output += " %s %s%s\n" % ((metric + ':').ljust(25), str(metric_list[1]).rjust(10), get_amount_changed(metric_list)) output += "\nCommandline performance:\n" for v8_or_frog in V8_AND_FROG: output += ' %s:\n' % v8_or_frog for benchmark in cl.values_list: bench_list = cl.values_dict[COMMAND_LINE][v8_or_frog][benchmark] output += " %s %s%s\n" % ((benchmark + ':').ljust(25), str(bench_list[1]).rjust(10), get_amount_changed(bench_list)) return output
def summary_stats(browser_perf, time_size, cl, correctness): """Return the summarized stats report. Args: browser_perf: BrowserPerformanceTestRunner object. Holds browser perf stats. time_size: CompileTimeSizeTestRunner object. cl: CommandLinePerformanceTestRunner object. correctness: BrowserCorrectnessTestRunner object. """ output = "Summary of changes in the last 24 hours: \n\nBrowser " + \ "performance: (revision %d)\n" % \ browser_perf.revision_dict[create_graph.get_browsers()[0]][FROG]\ [browser_perf.values_list[0]][1] for browser in create_graph.get_browsers(): geo_mean_list = browser_perf.values_dict[browser][FROG][FROG_MEAN] # TODO(efortuna): deal with the fact that the latest of all browsers may not # be available. output += " %s%s\n" % ( (browser + ':').ljust(25), str(geo_mean_list[1] - geo_mean_list[0]).rjust(10)) output += "\nCompile Size and Time: (revision %d)\n" % \ time_size.revision_dict[COMMAND_LINE][FROG][time_size.values_list[0]][1] for metric in time_size.values_list: metric_list = time_size.values_dict[COMMAND_LINE][FROG][metric] output += " %s%s\n" % ((metric + ':').ljust(25), str(metric_list[1] - metric_list[0]).rjust(10)) output += "\nPercentage of language tests passing (revision %d)\n" % \ correctness.revision_dict['chrome'][FROG][correctness.values_list[0]][1] for browser in create_graph.get_browsers(): num_correct = correctness.values_dict[browser][FROG][CORRECTNESS] output += " %s%s%% more passing\n" % ( (browser + ':').ljust(25), str(num_correct[1] - num_correct[0]).rjust(10)) output += "\nCommandline performance: (revision %d)\n" % \ cl.revision_dict[COMMAND_LINE][FROG][cl.values_list[0]][1] for benchmark in cl.values_list: bench_list = cl.values_dict[COMMAND_LINE][FROG][benchmark] output += " %s%s\n" % ((benchmark + ':').ljust(25), str(bench_list[1] - bench_list[0]).rjust(10)) return output
def summary_stats(browser_perf, time_size, cl, correctness): """Return the summarized stats report. Args: browser_perf: BrowserPerformanceTestRunner object. Holds browser perf stats. time_size: CompileTimeSizeTestRunner object. cl: CommandLinePerformanceTestRunner object. correctness: BrowserCorrectnessTestRunner object. """ output = "Summary of changes in the last 24 hours: \n\nBrowser " + \ "performance: (revision %d)\n" % \ browser_perf.revision_dict[create_graph.get_browsers()[0]][FROG]\ [browser_perf.values_list[0]][1] for browser in create_graph.get_browsers(): geo_mean_list = browser_perf.values_dict[browser][FROG][FROG_MEAN] # TODO(efortuna): deal with the fact that the latest of all browsers may not # be available. output += " %s%s\n" % ((browser + ':').ljust(25), str(geo_mean_list[1] - geo_mean_list[0]).rjust(10)) output += "\nCompile Size and Time: (revision %d)\n" % \ time_size.revision_dict[COMMAND_LINE][FROG][time_size.values_list[0]][1] for metric in time_size.values_list: metric_list = time_size.values_dict[COMMAND_LINE][FROG][metric] output += " %s%s\n" % ((metric + ':').ljust(25), str(metric_list[1] - metric_list[0]).rjust(10)) output += "\nPercentage of language tests passing (revision %d)\n" % \ correctness.revision_dict['chrome'][FROG][correctness.values_list[0]][1] for browser in create_graph.get_browsers(): num_correct = correctness.values_dict[browser][FROG][CORRECTNESS] output += " %s%s%% more passing\n" % ((browser + ':').ljust(25), str(num_correct[1] - num_correct[0]).rjust(10)) output += "\nCommandline performance: (revision %d)\n" % \ cl.revision_dict[COMMAND_LINE][FROG][cl.values_list[0]][1] for benchmark in cl.values_list: bench_list = cl.values_dict[COMMAND_LINE][FROG][benchmark] output += " %s%s\n" % ((benchmark + ':').ljust(25), str(bench_list[1] - bench_list[0]).rjust(10)) return output
def specific_stats(browser_perf, time_size, cl): """Return a string detailing all of the gory details and specifics on benchmark numbers and individual benchmark changes. Args: browser_perf: BrowserPerformanceTestRunner object. Holds browser perf stats. time_size: CompileTimeSizeTestRunner object. cl: CommandLinePerformanceTestRunner object. """ output = "\n\n---------------------------------------------\nThe latest " + \ "current raw numbers (and changes) for those " + \ "interested:\nBrowser performance:\n" for v8_or_frog in V8_AND_FROG: for browser in create_graph.get_browsers(): output += " %s %s:\n" % (browser, v8_or_frog) for benchmark in create_graph.get_benchmarks(): bench_list = browser_perf.values_dict[browser][v8_or_frog][ benchmark] output += " %s %s%s\n" % ( (benchmark + ':').ljust(25), str(bench_list[1]).rjust(10), get_amount_changed(bench_list)) output += "\nCompile Size and Time for frog:\n" for metric in time_size.values_list: metric_list = time_size.values_dict[COMMAND_LINE][FROG][metric] output += " %s %s%s\n" % ( (metric + ':').ljust(25), str( metric_list[1]).rjust(10), get_amount_changed(metric_list)) output += "\nCommandline performance:\n" for v8_or_frog in V8_AND_FROG: output += ' %s:\n' % v8_or_frog for benchmark in cl.values_list: bench_list = cl.values_dict[COMMAND_LINE][v8_or_frog][benchmark] output += " %s %s%s\n" % ( (benchmark + ':').ljust(25), str( bench_list[1]).rjust(10), get_amount_changed(bench_list)) return output