コード例 #1
0
ファイル: test_runner.py プロジェクト: nageshlop/proxy-1
def RunTestsInPlatformMode(args, result_sink_client=None):

  def infra_error(message):
    logging.fatal(message)
    sys.exit(constants.INFRA_EXIT_CODE)

  if args.command not in _SUPPORTED_IN_PLATFORM_MODE:
    infra_error('%s is not yet supported in platform mode' % args.command)

  ### Set up sigterm handler.

  contexts_to_notify_on_sigterm = []
  def unexpected_sigterm(_signum, _frame):
    msg = [
      'Received SIGTERM. Shutting down.',
    ]
    for live_thread in threading.enumerate():
      # pylint: disable=protected-access
      thread_stack = ''.join(traceback.format_stack(
          sys._current_frames()[live_thread.ident]))
      msg.extend([
        'Thread "%s" (ident: %s) is currently running:' % (
            live_thread.name, live_thread.ident),
        thread_stack])

    for context in contexts_to_notify_on_sigterm:
      context.ReceivedSigterm()

    infra_error('\n'.join(msg))

  signal.signal(signal.SIGTERM, unexpected_sigterm)

  ### Set up results handling.
  # TODO(jbudorick): Rewrite results handling.

  # all_raw_results is a list of lists of
  # base_test_result.TestRunResults objects. Each instance of
  # TestRunResults contains all test results produced by a single try,
  # while each list of TestRunResults contains all tries in a single
  # iteration.
  all_raw_results = []

  # all_iteration_results is a list of base_test_result.TestRunResults
  # objects. Each instance of TestRunResults contains the last test
  # result for each test run in that iteration.
  all_iteration_results = []

  global_results_tags = set()

  json_file = tempfile.NamedTemporaryFile(delete=False)
  json_file.close()

  @contextlib.contextmanager
  def json_finalizer():
    try:
      yield
    finally:
      if args.json_results_file and os.path.exists(json_file.name):
        shutil.move(json_file.name, args.json_results_file)
      elif args.isolated_script_test_output and os.path.exists(json_file.name):
        shutil.move(json_file.name, args.isolated_script_test_output)
      else:
        os.remove(json_file.name)

  @contextlib.contextmanager
  def json_writer():
    try:
      yield
    except Exception:
      global_results_tags.add('UNRELIABLE_RESULTS')
      raise
    finally:
      if args.isolated_script_test_output:
        json_results.GenerateJsonTestResultFormatFile(all_raw_results,
                                                      json_file.name,
                                                      indent=2)
      else:
        json_results.GenerateJsonResultsFile(
            all_raw_results,
            json_file.name,
            global_tags=list(global_results_tags),
            indent=2)

  @contextlib.contextmanager
  def upload_logcats_file():
    try:
      yield
    finally:
      if not args.logcat_output_file:
        logging.critical('Cannot upload logcat file: no file specified.')
      elif not os.path.exists(args.logcat_output_file):
        logging.critical("Cannot upload logcat file: file doesn't exist.")
      else:
        with open(args.logcat_output_file) as src:
          dst = logdog_helper.open_text('unified_logcats')
          if dst:
            shutil.copyfileobj(src, dst)
            dst.close()
            logging.critical(
                'Logcat: %s', logdog_helper.get_viewer_url('unified_logcats'))


  logcats_uploader = contextlib_ext.Optional(
      upload_logcats_file(),
      'upload_logcats_file' in args and args.upload_logcats_file)

  ### Set up test objects.

  out_manager = output_manager_factory.CreateOutputManager(args)
  env = environment_factory.CreateEnvironment(
      args, out_manager, infra_error)
  test_instance = test_instance_factory.CreateTestInstance(args, infra_error)
  test_run = test_run_factory.CreateTestRun(env, test_instance, infra_error)

  contexts_to_notify_on_sigterm.append(env)
  contexts_to_notify_on_sigterm.append(test_run)

  ### Run.
  with out_manager, json_finalizer():
    with json_writer(), logcats_uploader, env, test_instance, test_run:

      repetitions = (xrange(args.repeat + 1) if args.repeat >= 0
                     else itertools.count())
      result_counts = collections.defaultdict(
          lambda: collections.defaultdict(int))
      iteration_count = 0
      for _ in repetitions:
        # raw_results will be populated with base_test_result.TestRunResults by
        # test_run.RunTests(). It is immediately added to all_raw_results so
        # that in the event of an exception, all_raw_results will already have
        # the up-to-date results and those can be written to disk.
        raw_results = []
        all_raw_results.append(raw_results)

        test_run.RunTests(raw_results)
        if not raw_results:
          all_raw_results.pop()
          continue

        iteration_results = base_test_result.TestRunResults()
        for r in reversed(raw_results):
          iteration_results.AddTestRunResults(r)
        all_iteration_results.append(iteration_results)

        iteration_count += 1
        for r in iteration_results.GetAll():
          if result_sink_client:
            result_sink_client.Post(r.GetName(), r.GetType(), r.GetLog())

          result_counts[r.GetName()][r.GetType()] += 1
        report_results.LogFull(
            results=iteration_results,
            test_type=test_instance.TestType(),
            test_package=test_run.TestPackage(),
            annotation=getattr(args, 'annotations', None),
            flakiness_server=getattr(args, 'flakiness_dashboard_server',
                                     None))
        if args.break_on_failure and not iteration_results.DidRunPass():
          break

      if iteration_count > 1:
        # display summary results
        # only display results for a test if at least one test did not pass
        all_pass = 0
        tot_tests = 0
        for test_name in result_counts:
          tot_tests += 1
          if any(result_counts[test_name][x] for x in (
              base_test_result.ResultType.FAIL,
              base_test_result.ResultType.CRASH,
              base_test_result.ResultType.TIMEOUT,
              base_test_result.ResultType.UNKNOWN)):
            logging.critical(
                '%s: %s',
                test_name,
                ', '.join('%s %s' % (str(result_counts[test_name][i]), i)
                          for i in base_test_result.ResultType.GetTypes()))
          else:
            all_pass += 1

        logging.critical('%s of %s tests passed in all %s runs',
                         str(all_pass),
                         str(tot_tests),
                         str(iteration_count))

    if (args.local_output or not local_utils.IsOnSwarming()
        ) and not args.isolated_script_test_output:
      with out_manager.ArchivedTempfile(
          'test_results_presentation.html',
          'test_results_presentation',
          output_manager.Datatype.HTML) as results_detail_file:
        result_html_string, _, _ = test_results_presentation.result_details(
            json_path=json_file.name,
            test_name=args.command,
            cs_base_url='http://cs.chromium.org',
            local_output=True)
        results_detail_file.write(result_html_string.encode('utf-8'))
        results_detail_file.flush()
      logging.critical('TEST RESULTS: %s', results_detail_file.Link())

      ui_screenshots = test_results_presentation.ui_screenshot_set(
          json_file.name)
      if ui_screenshots:
        with out_manager.ArchivedTempfile(
            'ui_screenshots.json',
            'ui_capture',
            output_manager.Datatype.JSON) as ui_screenshot_file:
          ui_screenshot_file.write(ui_screenshots)
        logging.critical('UI Screenshots: %s', ui_screenshot_file.Link())

  return (0 if all(r.DidRunPass() for r in all_iteration_results)
          else constants.ERROR_EXIT_CODE)
コード例 #2
0
def RunTestsInPlatformMode(args):
    def infra_error(message):
        logging.fatal(message)
        sys.exit(constants.INFRA_EXIT_CODE)

    if args.command not in _SUPPORTED_IN_PLATFORM_MODE:
        infra_error('%s is not yet supported in platform mode' % args.command)

    ### Set up sigterm handler.

    def unexpected_sigterm(_signum, _frame):
        msg = [
            'Received SIGTERM. Shutting down.',
        ]
        for live_thread in threading.enumerate():
            # pylint: disable=protected-access
            thread_stack = ''.join(
                traceback.format_stack(
                    sys._current_frames()[live_thread.ident]))
            msg.extend([
                'Thread "%s" (ident: %s) is currently running:' %
                (live_thread.name, live_thread.ident), thread_stack
            ])

        infra_error('\n'.join(msg))

    signal.signal(signal.SIGTERM, unexpected_sigterm)

    ### Set up results handling.
    # TODO(jbudorick): Rewrite results handling.

    # all_raw_results is a list of lists of
    # base_test_result.TestRunResults objects. Each instance of
    # TestRunResults contains all test results produced by a single try,
    # while each list of TestRunResults contains all tries in a single
    # iteration.
    all_raw_results = []

    # all_iteration_results is a list of base_test_result.TestRunResults
    # objects. Each instance of TestRunResults contains the last test
    # result for each test run in that iteration.
    all_iteration_results = []

    @contextlib.contextmanager
    def write_json_file():
        try:
            yield
        finally:
            json_results.GenerateJsonResultsFile(all_raw_results,
                                                 args.json_results_file)

    json_writer = contextlib_ext.Optional(write_json_file(),
                                          args.json_results_file)

    @contextlib.contextmanager
    def upload_logcats_file():
        try:
            yield
        finally:
            if not args.logcat_output_file:
                logging.critical('Cannot upload logcats file. '
                                 'File to save logcat is not specified.')
            else:
                with open(args.logcat_output_file) as src:
                    dst = logdog_helper.open_text('unified_logcats')
                    if dst:
                        shutil.copyfileobj(src, dst)
                        dst.close()
                        logging.critical(
                            'Logcat: %s',
                            logdog_helper.get_viewer_url('unified_logcats'))

    logcats_uploader = contextlib_ext.Optional(
        upload_logcats_file(), 'upload_logcats_file' in args
        and args.upload_logcats_file)

    ### Set up test objects.

    env = environment_factory.CreateEnvironment(args, infra_error)
    test_instance = test_instance_factory.CreateTestInstance(args, infra_error)
    test_run = test_run_factory.CreateTestRun(args, env, test_instance,
                                              infra_error)

    ### Run.

    with json_writer, logcats_uploader, env, test_instance, test_run:

        repetitions = (xrange(args.repeat +
                              1) if args.repeat >= 0 else itertools.count())
        result_counts = collections.defaultdict(
            lambda: collections.defaultdict(int))
        iteration_count = 0
        for _ in repetitions:
            raw_results = test_run.RunTests()
            if not raw_results:
                continue

            all_raw_results.append(raw_results)

            iteration_results = base_test_result.TestRunResults()
            for r in reversed(raw_results):
                iteration_results.AddTestRunResults(r)
            all_iteration_results.append(iteration_results)

            iteration_count += 1
            for r in iteration_results.GetAll():
                result_counts[r.GetName()][r.GetType()] += 1
            report_results.LogFull(results=iteration_results,
                                   test_type=test_instance.TestType(),
                                   test_package=test_run.TestPackage(),
                                   annotation=getattr(args, 'annotations',
                                                      None),
                                   flakiness_server=getattr(
                                       args, 'flakiness_dashboard_server',
                                       None))
            if args.break_on_failure and not iteration_results.DidRunPass():
                break

        if iteration_count > 1:
            # display summary results
            # only display results for a test if at least one test did not pass
            all_pass = 0
            tot_tests = 0
            for test_name in result_counts:
                tot_tests += 1
                if any(result_counts[test_name][x]
                       for x in (base_test_result.ResultType.FAIL,
                                 base_test_result.ResultType.CRASH,
                                 base_test_result.ResultType.TIMEOUT,
                                 base_test_result.ResultType.UNKNOWN)):
                    logging.critical(
                        '%s: %s', test_name, ', '.join(
                            '%s %s' % (str(result_counts[test_name][i]), i)
                            for i in base_test_result.ResultType.GetTypes()))
                else:
                    all_pass += 1

            logging.critical('%s of %s tests passed in all %s runs',
                             str(all_pass), str(tot_tests),
                             str(iteration_count))

    if args.command == 'perf' and (args.steps or args.single_step):
        return 0

    return (0 if all(
        r.DidRunPass()
        for r in all_iteration_results) else constants.ERROR_EXIT_CODE)
コード例 #3
0
def _RunInstrumentationTests(args, devices):
    """Subcommand of RunTestsCommands which runs instrumentation tests."""
    logging.info('_RunInstrumentationTests(%s, %s)' %
                 (str(args), str(devices)))

    instrumentation_options = ProcessInstrumentationOptions(args)

    if len(devices) > 1 and args.wait_for_debugger:
        logging.warning(
            'Debugger can not be sharded, using first available device')
        devices = devices[:1]

    results = base_test_result.TestRunResults()
    exit_code = 0

    if args.run_java_tests:
        runner_factory, tests = instrumentation_setup.Setup(
            instrumentation_options, devices)

        test_results, exit_code = test_dispatcher.RunTests(
            tests,
            runner_factory,
            devices,
            shard=True,
            test_timeout=None,
            num_retries=args.num_retries)

        results.AddTestRunResults(test_results)

    if args.run_python_tests:
        runner_factory, tests = host_driven_setup.InstrumentationSetup(
            args.host_driven_root, args.official_build,
            instrumentation_options)

        if tests:
            test_results, test_exit_code = test_dispatcher.RunTests(
                tests,
                runner_factory,
                devices,
                shard=True,
                test_timeout=None,
                num_retries=args.num_retries)

            results.AddTestRunResults(test_results)

            # Only allow exit code escalation
            if test_exit_code and exit_code != constants.ERROR_EXIT_CODE:
                exit_code = test_exit_code

    if args.device_flags:
        args.device_flags = os.path.join(constants.DIR_SOURCE_ROOT,
                                         args.device_flags)

    report_results.LogFull(results=results,
                           test_type='Instrumentation',
                           test_package=os.path.basename(args.test_apk),
                           annotation=args.annotations,
                           flakiness_server=args.flakiness_dashboard_server)

    if args.json_results_file:
        json_results.GenerateJsonResultsFile(results, args.json_results_file)

    return exit_code
コード例 #4
0
def _RunInstrumentationTests(args, devices):
    """Subcommand of RunTestsCommands which runs instrumentation tests."""
    logging.info('_RunInstrumentationTests(%s, %s)', str(args), str(devices))

    instrumentation_options = ProcessInstrumentationOptions(args)

    if len(devices) > 1 and args.wait_for_debugger:
        logging.warning(
            'Debugger can not be sharded, using first available device')
        devices = devices[:1]

    results = base_test_result.TestRunResults()
    exit_code = 0

    if args.run_java_tests:
        java_runner_factory, java_tests = instrumentation_setup.Setup(
            instrumentation_options, devices)
    else:
        java_runner_factory = None
        java_tests = None

    if args.run_python_tests:
        py_runner_factory, py_tests = host_driven_setup.InstrumentationSetup(
            args.host_driven_root, args.official_build,
            instrumentation_options)
    else:
        py_runner_factory = None
        py_tests = None

    results = []
    repetitions = (xrange(args.repeat +
                          1) if args.repeat >= 0 else itertools.count())
    for _ in repetitions:
        iteration_results = base_test_result.TestRunResults()
        if java_tests:
            test_results, test_exit_code = test_dispatcher.RunTests(
                java_tests,
                java_runner_factory,
                devices,
                shard=True,
                test_timeout=None,
                num_retries=args.num_retries)
            iteration_results.AddTestRunResults(test_results)

            # Only allow exit code escalation
            if test_exit_code and exit_code != constants.ERROR_EXIT_CODE:
                exit_code = test_exit_code

        if py_tests:
            test_results, test_exit_code = test_dispatcher.RunTests(
                py_tests,
                py_runner_factory,
                devices,
                shard=True,
                test_timeout=None,
                num_retries=args.num_retries)
            iteration_results.AddTestRunResults(test_results)

            # Only allow exit code escalation
            if test_exit_code and exit_code != constants.ERROR_EXIT_CODE:
                exit_code = test_exit_code

        results.append(iteration_results)
        report_results.LogFull(
            results=iteration_results,
            test_type='Instrumentation',
            test_package=os.path.basename(args.test_apk),
            annotation=args.annotations,
            flakiness_server=args.flakiness_dashboard_server)

    if args.json_results_file:
        json_results.GenerateJsonResultsFile(results, args.json_results_file)

    return exit_code
コード例 #5
0
def _RunInstrumentationTests(args, devices):
    """Subcommand of RunTestsCommands which runs instrumentation tests."""
    logging.info('_RunInstrumentationTests(%s, %s)', str(args), str(devices))

    instrumentation_options = ProcessInstrumentationOptions(args)

    if len(devices) > 1 and args.wait_for_debugger:
        logging.warning(
            'Debugger can not be sharded, using first available device')
        devices = devices[:1]

    results = base_test_result.TestRunResults()
    exit_code = 0

    if args.run_java_tests:
        java_runner_factory, java_tests = instrumentation_setup.Setup(
            instrumentation_options, devices)
    else:
        java_runner_factory = None
        java_tests = None

    if args.run_python_tests:
        py_runner_factory, py_tests = host_driven_setup.InstrumentationSetup(
            args.host_driven_root, args.official_build,
            instrumentation_options)
    else:
        py_runner_factory = None
        py_tests = None

    results = []
    repetitions = (xrange(args.repeat +
                          1) if args.repeat >= 0 else itertools.count())

    code_counts = {
        constants.INFRA_EXIT_CODE: 0,
        constants.ERROR_EXIT_CODE: 0,
        constants.WARNING_EXIT_CODE: 0,
        0: 0
    }

    def _escalate_code(old, new):
        for x in (constants.INFRA_EXIT_CODE, constants.ERROR_EXIT_CODE,
                  constants.WARNING_EXIT_CODE):
            if x in (old, new):
                return x
        return 0

    for _ in repetitions:
        iteration_results = base_test_result.TestRunResults()
        if java_tests:
            test_results, test_exit_code = test_dispatcher.RunTests(
                java_tests,
                java_runner_factory,
                devices,
                shard=True,
                test_timeout=None,
                num_retries=args.num_retries)
            iteration_results.AddTestRunResults(test_results)

            code_counts[test_exit_code] += 1
            exit_code = _escalate_code(exit_code, test_exit_code)

        if py_tests:
            test_results, test_exit_code = test_dispatcher.RunTests(
                py_tests,
                py_runner_factory,
                devices,
                shard=True,
                test_timeout=None,
                num_retries=args.num_retries)
            iteration_results.AddTestRunResults(test_results)

            code_counts[test_exit_code] += 1
            exit_code = _escalate_code(exit_code, test_exit_code)

        results.append(iteration_results)
        report_results.LogFull(
            results=iteration_results,
            test_type='Instrumentation',
            test_package=os.path.basename(args.test_apk),
            annotation=args.annotations,
            flakiness_server=args.flakiness_dashboard_server)

        if args.break_on_failure and exit_code in (constants.ERROR_EXIT_CODE,
                                                   constants.INFRA_EXIT_CODE):
            break

    logging.critical(
        'Instr tests: %s success, %s infra, %s errors, %s warnings',
        str(code_counts[0]), str(code_counts[constants.INFRA_EXIT_CODE]),
        str(code_counts[constants.ERROR_EXIT_CODE]),
        str(code_counts[constants.WARNING_EXIT_CODE]))

    if args.json_results_file:
        json_results.GenerateJsonResultsFile(results, args.json_results_file)

    return exit_code
コード例 #6
0
def RunTestsInPlatformMode(args):
    def infra_error(message):
        logging.fatal(message)
        sys.exit(constants.INFRA_EXIT_CODE)

    if args.command not in _SUPPORTED_IN_PLATFORM_MODE:
        infra_error('%s is not yet supported in platform mode' % args.command)

    with environment_factory.CreateEnvironment(args, infra_error) as env:
        with test_instance_factory.CreateTestInstance(args,
                                                      infra_error) as test:
            with test_run_factory.CreateTestRun(args, env, test,
                                                infra_error) as test_run:
                results = []
                repetitions = (xrange(args.repeat + 1)
                               if args.repeat >= 0 else itertools.count())
                result_counts = collections.defaultdict(
                    lambda: collections.defaultdict(int))
                iteration_count = 0
                for _ in repetitions:
                    iteration_results = test_run.RunTests()
                    if iteration_results is not None:
                        iteration_count += 1
                        results.append(iteration_results)
                        for r in iteration_results.GetAll():
                            result_counts[r.GetName()][r.GetType()] += 1
                        report_results.LogFull(
                            results=iteration_results,
                            test_type=test.TestType(),
                            test_package=test_run.TestPackage(),
                            annotation=getattr(args, 'annotations', None),
                            flakiness_server=getattr(
                                args, 'flakiness_dashboard_server', None))
                        if args.break_on_failure and not iteration_results.DidRunPass(
                        ):
                            break

                if iteration_count > 1:
                    # display summary results
                    # only display results for a test if at least one test did not pass
                    all_pass = 0
                    tot_tests = 0
                    for test_name in result_counts:
                        tot_tests += 1
                        if any(result_counts[test_name][x]
                               for x in (base_test_result.ResultType.FAIL,
                                         base_test_result.ResultType.CRASH,
                                         base_test_result.ResultType.TIMEOUT,
                                         base_test_result.ResultType.UNKNOWN)):
                            logging.critical(
                                '%s: %s', test_name, ', '.join(
                                    '%s %s' %
                                    (str(result_counts[test_name][i]), i) for i
                                    in base_test_result.ResultType.GetTypes()))
                        else:
                            all_pass += 1

                    logging.critical('%s of %s tests passed in all %s runs',
                                     str(all_pass), str(tot_tests),
                                     str(iteration_count))

                if args.json_results_file:
                    json_results.GenerateJsonResultsFile(
                        results, args.json_results_file)

    return (0 if all(r.DidRunPass()
                     for r in results) else constants.ERROR_EXIT_CODE)
コード例 #7
0
def RunTestsInPlatformMode(args):
    def infra_error(message):
        logging.fatal(message)
        sys.exit(constants.INFRA_EXIT_CODE)

    if args.command not in _SUPPORTED_IN_PLATFORM_MODE:
        infra_error('%s is not yet supported in platform mode' % args.command)

    ### Set up sigterm handler.

    def unexpected_sigterm(_signum, _frame):
        infra_error('Received SIGTERM. Shutting down.')

    sigterm_handler = signal_handler.SignalHandler(signal.SIGTERM,
                                                   unexpected_sigterm)

    ### Set up results handling.
    # TODO(jbudorick): Rewrite results handling.

    # all_raw_results is a list of lists of
    # base_test_result.TestRunResults objects. Each instance of
    # TestRunResults contains all test results produced by a single try,
    # while each list of TestRunResults contains all tries in a single
    # iteration.
    all_raw_results = []

    # all_iteration_results is a list of base_test_result.TestRunResults
    # objects. Each instance of TestRunResults contains the last test
    # result for each test run in that iteration.
    all_iteration_results = []

    @contextlib.contextmanager
    def noop():
        yield

    json_writer = noop()
    if args.json_results_file:

        @contextlib.contextmanager
        def write_json_file():
            try:
                yield
            finally:
                json_results.GenerateJsonResultsFile(all_raw_results,
                                                     args.json_results_file)

        json_writer = write_json_file()

    ### Set up test objects.

    env = environment_factory.CreateEnvironment(args, infra_error)
    test_instance = test_instance_factory.CreateTestInstance(args, infra_error)
    test_run = test_run_factory.CreateTestRun(args, env, test_instance,
                                              infra_error)

    ### Run.

    with sigterm_handler, json_writer, env, test_instance, test_run:

        repetitions = (xrange(args.repeat +
                              1) if args.repeat >= 0 else itertools.count())
        result_counts = collections.defaultdict(
            lambda: collections.defaultdict(int))
        iteration_count = 0
        for _ in repetitions:
            raw_results = test_run.RunTests()
            if not raw_results:
                continue

            all_raw_results.append(raw_results)

            iteration_results = base_test_result.TestRunResults()
            for r in reversed(raw_results):
                iteration_results.AddTestRunResults(r)
            all_iteration_results.append(iteration_results)

            iteration_count += 1
            for r in iteration_results.GetAll():
                result_counts[r.GetName()][r.GetType()] += 1
            report_results.LogFull(results=iteration_results,
                                   test_type=test_instance.TestType(),
                                   test_package=test_run.TestPackage(),
                                   annotation=getattr(args, 'annotations',
                                                      None),
                                   flakiness_server=getattr(
                                       args, 'flakiness_dashboard_server',
                                       None))
            if args.break_on_failure and not iteration_results.DidRunPass():
                break

        if iteration_count > 1:
            # display summary results
            # only display results for a test if at least one test did not pass
            all_pass = 0
            tot_tests = 0
            for test_name in result_counts:
                tot_tests += 1
                if any(result_counts[test_name][x]
                       for x in (base_test_result.ResultType.FAIL,
                                 base_test_result.ResultType.CRASH,
                                 base_test_result.ResultType.TIMEOUT,
                                 base_test_result.ResultType.UNKNOWN)):
                    logging.critical(
                        '%s: %s', test_name, ', '.join(
                            '%s %s' % (str(result_counts[test_name][i]), i)
                            for i in base_test_result.ResultType.GetTypes()))
                else:
                    all_pass += 1

            logging.critical('%s of %s tests passed in all %s runs',
                             str(all_pass), str(tot_tests),
                             str(iteration_count))

    if args.command == 'perf' and (args.steps or args.single_step):
        return 0

    return (0 if all(
        r.DidRunPass()
        for r in all_iteration_results) else constants.ERROR_EXIT_CODE)