def _ListAvailableBenchmarks(bot_platform): """Gets all available benchmarks names as a list.""" browser_type = _GetBrowserType(bot_platform) if os.path.exists(BENCHMARKS_JSON_FILE): os.remove(BENCHMARKS_JSON_FILE) command = [] if 'win' in bot_platform: command.append('python') command.append('tools/perf/run_benchmark') command.extend([ 'list', '--browser', browser_type, '--json-output', BENCHMARKS_JSON_FILE ]) try: output, return_code = bisect_utils.RunProcessAndRetrieveOutput( command=command, cwd=SRC_DIR) if return_code: raise RuntimeError( 'Something went wrong while listing benchmarks. ' 'Please review the command line: %s.\nERROR: [%s]' % (' '.join(command), output)) with open(BENCHMARKS_JSON_FILE) as tests_json: tests_data = json.load(tests_json) if tests_data.get('steps'): return tests_data.get('steps').keys() finally: try: if os.path.exists(BENCHMARKS_JSON_FILE): os.remove(BENCHMARKS_JSON_FILE) except OSError as e: if e.errno != errno.ENOENT: raise return None
def _RunBenchmark(bisect_instance, opts, bot_name, benchmark_name): """Runs a Telemetry benchmark.""" bisect_utils.OutputAnnotationStepStart(benchmark_name) command_to_run = _GuessTelemetryTestCommand(bot_name, benchmark_name) args = shlex.split(command_to_run, posix=not bisect_utils.IsWindowsHost()) output, return_code = bisect_utils.RunProcessAndRetrieveOutput( args, SRC_DIR) # A value other than 0 indicates that the test couldn't be run, and results # should also include an error message. if return_code: print( 'Error: Something went wrong running the benchmark: %s.' 'Please review the command line:%s\n\n%s' % (benchmark_name, command_to_run, output)) bisect_utils.OutputAnnotationStepFailure() print output bisect_utils.OutputAnnotationStepClosed() # results[1] contains the return code from subprocess that executes test # command, On successful test run it contains 0 otherwise any non-zero value. return return_code == 0