def _SetupAndRunPerformanceTest(config, path_to_goma, is_cq_tryjob=False): """Attempts to build and run the current revision with and without the current patch, with the parameters passed in. Args: config: The config read from run-perf-test.cfg. path_to_goma: Path to goma directory. is_cq_tryjob: Whether or not the try job was initiated by commit queue. Returns: An exit code: 0 on success, otherwise 1. """ if platform.release() == 'XP': print 'Windows XP is not supported for perf try jobs because it lacks ' print 'goma support. Please refer to crbug.com/330900.' return 1 try: with Goma(path_to_goma) as _: config['use_goma'] = bool(path_to_goma) if config['use_goma']: config['goma_dir'] = os.path.abspath(path_to_goma) if not is_cq_tryjob: _RunPerformanceTest(config) else: return _RunBenchmarksForCommitQueue(config) return 0 except RuntimeError, e: bisect_utils.OutputAnnotationStepFailure() bisect_utils.OutputAnnotationStepClosed() _OutputFailedResults('Error: %s' % e.message) return 1
def _OutputOverallResults(results): """Creates results step and prints results on buildbot job.""" test_status = all(current_value == True for current_value in results.values()) bisect_utils.OutputAnnotationStepStart( 'Results - %s' % ('Passed' if test_status else 'Failed')) print print 'Results of benchmarks:' print for benchmark, result in results.iteritems(): print '%s: %s' % (benchmark, 'Passed' if result else 'Failed') if not test_status: bisect_utils.OutputAnnotationStepFailure() bisect_utils.OutputAnnotationStepClosed() # Returns 0 for success and 1 for failure. return 0 if test_status else 1
def _RunBenchmark(bisect_instance, opts, bot_name, benchmark_name): """Runs a Telemetry benchmark.""" bisect_utils.OutputAnnotationStepStart(benchmark_name) command_to_run = _GuessTelemetryTestCommand(bot_name, benchmark_name) args = shlex.split(command_to_run, posix=not bisect_utils.IsWindowsHost()) output, return_code = bisect_utils.RunProcessAndRetrieveOutput( args, SRC_DIR) # A value other than 0 indicates that the test couldn't be run, and results # should also include an error message. if return_code: print( 'Error: Something went wrong running the benchmark: %s.' 'Please review the command line:%s\n\n%s' % (benchmark_name, command_to_run, output)) bisect_utils.OutputAnnotationStepFailure() print output bisect_utils.OutputAnnotationStepClosed() # results[1] contains the return code from subprocess that executes test # command, On successful test run it contains 0 otherwise any non-zero value. return return_code == 0