Exemple #1
0
def RunTestsInPlatformMode(args, parser):

    if args.command not in _SUPPORTED_IN_PLATFORM_MODE:
        parser.error('%s is not yet supported in platform mode' % args.command)

    with environment_factory.CreateEnvironment(args, parser.error) as env:
        with test_instance_factory.CreateTestInstance(args,
                                                      parser.error) as test:
            with test_run_factory.CreateTestRun(args, env, test,
                                                parser.error) as test_run:
                results = test_run.RunTests()

                if args.environment == 'remote_device' and args.trigger:
                    return 0  # Not returning results, only triggering.

                report_results.LogFull(
                    results=results,
                    test_type=test.TestType(),
                    test_package=test_run.TestPackage(),
                    annotation=getattr(args, 'annotations', None),
                    flakiness_server=getattr(args,
                                             'flakiness_dashboard_server',
                                             None))

                if args.json_results_file:
                    json_results.GenerateJsonResultsFile(
                        results, args.json_results_file)

    return 0 if results.DidRunPass() else constants.ERROR_EXIT_CODE
Exemple #2
0
def _RunGTests(args, devices):
    """Subcommand of RunTestsCommands which runs gtests."""
    exit_code = 0
    for suite_name in args.suite_name:
        # TODO(jbudorick): Either deprecate multi-suite or move its handling down
        # into the gtest code.
        gtest_options = gtest_test_options.GTestOptions(
            args.tool, args.test_filter, args.run_disabled,
            args.test_arguments, args.timeout, args.isolate_file_path,
            suite_name, args.app_data_files, args.app_data_file_dir,
            args.delete_stale_data)
        runner_factory, tests = gtest_setup.Setup(gtest_options, devices)

        results, test_exit_code = test_dispatcher.RunTests(
            tests,
            runner_factory,
            devices,
            shard=True,
            test_timeout=None,
            num_retries=args.num_retries)

        if test_exit_code and exit_code != constants.ERROR_EXIT_CODE:
            exit_code = test_exit_code

        report_results.LogFull(
            results=results,
            test_type='Unit test',
            test_package=suite_name,
            flakiness_server=args.flakiness_dashboard_server)

        if args.json_results_file:
            json_results.GenerateJsonResultsFile(results,
                                                 args.json_results_file)

    return exit_code
def RunTestsInPlatformMode(args, parser):
    def infra_error(message):
        parser.exit(status=constants.INFRA_EXIT_CODE, message=message)

    if args.command not in _SUPPORTED_IN_PLATFORM_MODE:
        infra_error('%s is not yet supported in platform mode' % args.command)

    with environment_factory.CreateEnvironment(args, infra_error) as env:
        with test_instance_factory.CreateTestInstance(args,
                                                      infra_error) as test:
            with test_run_factory.CreateTestRun(args, env, test,
                                                infra_error) as test_run:
                results = []
                repetitions = (xrange(args.repeat + 1)
                               if args.repeat >= 0 else itertools.count())
                for _ in repetitions:
                    iteration_results = test_run.RunTests()

                    if iteration_results is not None:
                        results.append(iteration_results)
                        report_results.LogFull(
                            results=iteration_results,
                            test_type=test.TestType(),
                            test_package=test_run.TestPackage(),
                            annotation=getattr(args, 'annotations', None),
                            flakiness_server=getattr(
                                args, 'flakiness_dashboard_server', None))

                if args.json_results_file:
                    json_results.GenerateJsonResultsFile(
                        results, args.json_results_file)

    return (0 if all(r.DidRunPass()
                     for r in results) else constants.ERROR_EXIT_CODE)
 def write_json_file():
   try:
     yield
   except Exception:
     global_results_tags.add('UNRELIABLE_RESULTS')
     raise
   finally:
     json_results.GenerateJsonResultsFile(
         all_raw_results, args.json_results_file,
         global_tags=list(global_results_tags))
Exemple #5
0
 def json_writer():
   try:
     yield
   except Exception:
     global_results_tags.add('UNRELIABLE_RESULTS')
     raise
   finally:
     json_results.GenerateJsonResultsFile(
         all_raw_results, json_file.name,
         global_tags=list(global_results_tags),
         indent=2)
def _RunInstrumentationTests(args, devices):
  """Subcommand of RunTestsCommands which runs instrumentation tests."""
  logging.info('_RunInstrumentationTests(%s, %s)' % (str(args), str(devices)))

  instrumentation_options = ProcessInstrumentationOptions(args)

  if len(devices) > 1 and args.wait_for_debugger:
    logging.warning('Debugger can not be sharded, using first available device')
    devices = devices[:1]

  results = base_test_result.TestRunResults()
  exit_code = 0

  if args.run_java_tests:
    runner_factory, tests = instrumentation_setup.Setup(
        instrumentation_options, devices)

    test_results, exit_code = test_dispatcher.RunTests(
        tests, runner_factory, devices, shard=True, test_timeout=None,
        num_retries=args.num_retries)

    results.AddTestRunResults(test_results)

  if args.run_python_tests:
    runner_factory, tests = host_driven_setup.InstrumentationSetup(
        args.host_driven_root, args.official_build,
        instrumentation_options)

    if tests:
      test_results, test_exit_code = test_dispatcher.RunTests(
          tests, runner_factory, devices, shard=True, test_timeout=None,
          num_retries=args.num_retries)

      results.AddTestRunResults(test_results)

      # Only allow exit code escalation
      if test_exit_code and exit_code != constants.ERROR_EXIT_CODE:
        exit_code = test_exit_code

  if args.device_flags:
    args.device_flags = os.path.join(constants.DIR_SOURCE_ROOT,
                                     args.device_flags)

  report_results.LogFull(
      results=results,
      test_type='Instrumentation',
      test_package=os.path.basename(args.test_apk),
      annotation=args.annotations,
      flakiness_server=args.flakiness_dashboard_server)

  if args.json_results_file:
    json_results.GenerateJsonResultsFile(results, args.json_results_file)

  return exit_code
Exemple #7
0
def _RunJUnitTests(args):
    """Subcommand of RunTestsCommand which runs junit tests."""
    runner_factory, tests = junit_setup.Setup(args)
    results, exit_code = junit_dispatcher.RunTests(tests, runner_factory)

    report_results.LogFull(results=results,
                           test_type='JUnit',
                           test_package=args.test_suite)

    if args.json_results_file:
        json_results.GenerateJsonResultsFile(results, args.json_results_file)

    return exit_code
Exemple #8
0
 def json_writer():
   try:
     yield
   except Exception:
     global_results_tags.add('UNRELIABLE_RESULTS')
     raise
   finally:
     if args.isolated_script_test_output:
       json_results.GenerateJsonTestResultFormatFile(all_raw_results,
                                                     json_file.name,
                                                     indent=2)
     else:
       json_results.GenerateJsonResultsFile(
           all_raw_results,
           json_file.name,
           global_tags=list(global_results_tags),
           indent=2)
def _RunLinkerTests(args, devices):
  """Subcommand of RunTestsCommands which runs linker tests."""
  runner_factory, tests = linker_setup.Setup(args, devices)

  results, exit_code = test_dispatcher.RunTests(
      tests, runner_factory, devices, shard=True, test_timeout=60,
      num_retries=args.num_retries)

  report_results.LogFull(
      results=results,
      test_type='Linker test',
      test_package='ChromiumLinkerTest')

  if args.json_results_file:
    json_results.GenerateJsonResultsFile([results], args.json_results_file)

  return exit_code
Exemple #10
0
    def json_writer():
        try:
            yield
        except Exception:
            global_results_tags.add('UNRELIABLE_RESULTS')
            raise
        finally:
            if args.isolated_script_test_output:
                interrupted = 'UNRELIABLE_RESULTS' in global_results_tags
                json_results.GenerateJsonTestResultFormatFile(all_raw_results,
                                                              interrupted,
                                                              json_file.name,
                                                              indent=2)
            else:
                json_results.GenerateJsonResultsFile(
                    all_raw_results,
                    json_file.name,
                    global_tags=list(global_results_tags),
                    indent=2)

            test_class_to_file_name_dict = {}
            # Test Location is only supported for instrumentation tests as it
            # requires the size-info file.
            if test_instance.TestType() == 'instrumentation':
                test_class_to_file_name_dict = _CreateClassToFileNameDict(
                    args.test_apk)

            if result_sink_client:
                for run in all_raw_results:
                    for results in run:
                        for r in results.GetAll():
                            # Matches chrome.page_info.PageInfoViewTest#testChromePage
                            match = re.search(r'^(.+\..+)#', r.GetName())
                            test_file_name = test_class_to_file_name_dict.get(
                                match.group(1)) if match else None
                            # Some tests put in non utf-8 char as part of the test
                            # which breaks uploads, so need to decode and re-encode.
                            result_sink_client.Post(
                                r.GetName(),
                                r.GetType(),
                                r.GetDuration(),
                                r.GetLog().decode('utf-8',
                                                  'replace').encode('utf-8'),
                                test_file_name,
                                failure_reason=r.GetFailureReason())
Exemple #11
0
def _RunPerfTests(args):
    """Subcommand of RunTestsCommands which runs perf tests."""
    perf_options = ProcessPerfTestOptions(args)

    # Just save a simple json with a list of test names.
    if perf_options.output_json_list:
        return perf_test_runner.OutputJsonList(perf_options.steps,
                                               perf_options.output_json_list)

    if perf_options.output_chartjson_data:
        return perf_test_runner.OutputChartjson(
            perf_options.print_step, perf_options.output_chartjson_data)

    # Just print the results from a single previously executed step.
    if perf_options.print_step:
        return perf_test_runner.PrintTestOutput(perf_options.print_step)

    runner_factory, tests, devices = perf_setup.Setup(perf_options)

    # shard=False means that each device will get the full list of tests
    # and then each one will decide their own affinity.
    # shard=True means each device will pop the next test available from a queue,
    # which increases throughput but have no affinity.
    results, _ = test_dispatcher.RunTests(tests,
                                          runner_factory,
                                          devices,
                                          shard=False,
                                          test_timeout=None,
                                          num_retries=args.num_retries)

    report_results.LogFull(results=results,
                           test_type='Perf',
                           test_package='Perf')

    if args.json_results_file:
        json_results.GenerateJsonResultsFile(results, args.json_results_file)

    if perf_options.single_step:
        return perf_test_runner.PrintTestOutput('single_step')

    perf_test_runner.PrintSummary(tests)

    # Always return 0 on the sharding stage. Individual tests exit_code
    # will be returned on the print_step stage.
    return 0
def _RunMonkeyTests(args, devices):
  """Subcommand of RunTestsCommands which runs monkey tests."""
  monkey_options = ProcessMonkeyTestOptions(args)

  runner_factory, tests = monkey_setup.Setup(monkey_options)

  results, exit_code = test_dispatcher.RunTests(
      tests, runner_factory, devices, shard=False, test_timeout=None,
      num_retries=args.num_retries)

  report_results.LogFull(
      results=results,
      test_type='Monkey',
      test_package='Monkey')

  if args.json_results_file:
    json_results.GenerateJsonResultsFile([results], args.json_results_file)

  return exit_code
def _RunUIAutomatorTests(args, devices):
  """Subcommand of RunTestsCommands which runs uiautomator tests."""
  uiautomator_options = ProcessUIAutomatorOptions(args)

  runner_factory, tests = uiautomator_setup.Setup(uiautomator_options)

  results, exit_code = test_dispatcher.RunTests(
      tests, runner_factory, devices, shard=True, test_timeout=None,
      num_retries=args.num_retries)

  report_results.LogFull(
      results=results,
      test_type='UIAutomator',
      test_package=os.path.basename(args.test_jar),
      annotation=args.annotations,
      flakiness_server=args.flakiness_dashboard_server)

  if args.json_results_file:
    json_results.GenerateJsonResultsFile(results, args.json_results_file)

  return exit_code
Exemple #14
0
    def json_writer():
        try:
            yield
        except Exception:
            global_results_tags.add('UNRELIABLE_RESULTS')
            raise
        finally:
            if args.isolated_script_test_output:
                interrupted = 'UNRELIABLE_RESULTS' in global_results_tags
                json_results.GenerateJsonTestResultFormatFile(all_raw_results,
                                                              interrupted,
                                                              json_file.name,
                                                              indent=2)
            else:
                json_results.GenerateJsonResultsFile(
                    all_raw_results,
                    json_file.name,
                    global_tags=list(global_results_tags),
                    indent=2)

            test_class_to_file_name_dict = {}
            # Test Location is only supported for instrumentation tests as it
            # requires the size-info file.
            if test_instance.TestType() == 'instrumentation':
                test_class_to_file_name_dict = _CreateClassToFileNameDict(
                    args.test_apk)

            if result_sink_client:
                for run in all_raw_results:
                    for results in run:
                        for r in results.GetAll():
                            # Matches chrome.page_info.PageInfoViewTest#testChromePage
                            match = re.search(r'^(.+\..+)#', r.GetName())
                            test_file_name = test_class_to_file_name_dict.get(
                                match.group(1)) if match else None
                            _SinkTestResult(r, test_file_name,
                                            result_sink_client)
Exemple #15
0
def RunTestsInPlatformMode(args, parser):

    if args.command not in _SUPPORTED_IN_PLATFORM_MODE:
        parser.error('%s is not yet supported in platform mode' % args.command)

    with environment_factory.CreateEnvironment(args, parser.error) as env:
        with test_instance_factory.CreateTestInstance(args,
                                                      parser.error) as test:
            with test_run_factory.CreateTestRun(args, env, test,
                                                parser.error) as test_run:
                results = test_run.RunTests()

                report_results.LogFull(
                    results=results,
                    test_type=test.TestType(),
                    test_package=test_run.TestPackage(),
                    annotation=args.annotations,
                    flakiness_server=args.flakiness_dashboard_server)

                if args.json_results_file:
                    json_results.GenerateJsonResultsFile(
                        results, args.json_results_file)

    return results
def _RunInstrumentationTests(args, devices):
    """Subcommand of RunTestsCommands which runs instrumentation tests."""
    logging.info('_RunInstrumentationTests(%s, %s)', str(args), str(devices))

    instrumentation_options = ProcessInstrumentationOptions(args)

    if len(devices) > 1 and args.wait_for_debugger:
        logging.warning(
            'Debugger can not be sharded, using first available device')
        devices = devices[:1]

    results = base_test_result.TestRunResults()
    exit_code = 0

    if args.run_java_tests:
        java_runner_factory, java_tests = instrumentation_setup.Setup(
            instrumentation_options, devices)
    else:
        java_runner_factory = None
        java_tests = None

    if args.run_python_tests:
        py_runner_factory, py_tests = host_driven_setup.InstrumentationSetup(
            args.host_driven_root, args.official_build,
            instrumentation_options)
    else:
        py_runner_factory = None
        py_tests = None

    results = []
    repetitions = (xrange(args.repeat +
                          1) if args.repeat >= 0 else itertools.count())
    for _ in repetitions:
        iteration_results = base_test_result.TestRunResults()
        if java_tests:
            test_results, test_exit_code = test_dispatcher.RunTests(
                java_tests,
                java_runner_factory,
                devices,
                shard=True,
                test_timeout=None,
                num_retries=args.num_retries)
            iteration_results.AddTestRunResults(test_results)

            # Only allow exit code escalation
            if test_exit_code and exit_code != constants.ERROR_EXIT_CODE:
                exit_code = test_exit_code

        if py_tests:
            test_results, test_exit_code = test_dispatcher.RunTests(
                py_tests,
                py_runner_factory,
                devices,
                shard=True,
                test_timeout=None,
                num_retries=args.num_retries)
            iteration_results.AddTestRunResults(test_results)

            # Only allow exit code escalation
            if test_exit_code and exit_code != constants.ERROR_EXIT_CODE:
                exit_code = test_exit_code

        results.append(iteration_results)
        report_results.LogFull(
            results=iteration_results,
            test_type='Instrumentation',
            test_package=os.path.basename(args.test_apk),
            annotation=args.annotations,
            flakiness_server=args.flakiness_dashboard_server)

    if args.json_results_file:
        json_results.GenerateJsonResultsFile(results, args.json_results_file)

    return exit_code
Exemple #17
0
def _RunInstrumentationTests(args, devices):
  """Subcommand of RunTestsCommands which runs instrumentation tests."""
  logging.info('_RunInstrumentationTests(%s, %s)', str(args), str(devices))

  instrumentation_options = ProcessInstrumentationOptions(args)

  if len(devices) > 1 and args.wait_for_debugger:
    logging.warning('Debugger can not be sharded, using first available device')
    devices = devices[:1]

  results = base_test_result.TestRunResults()
  exit_code = 0

  if args.run_java_tests:
    java_runner_factory, java_tests = instrumentation_setup.Setup(
        instrumentation_options, devices)
  else:
    java_runner_factory = None
    java_tests = None

  if args.run_python_tests:
    py_runner_factory, py_tests = host_driven_setup.InstrumentationSetup(
        args.host_driven_root, args.official_build,
        instrumentation_options)
  else:
    py_runner_factory = None
    py_tests = None

  results = []
  repetitions = (xrange(args.repeat + 1) if args.repeat >= 0
                 else itertools.count())

  code_counts = {constants.INFRA_EXIT_CODE: 0,
                 constants.ERROR_EXIT_CODE: 0,
                 constants.WARNING_EXIT_CODE: 0,
                 0: 0}

  def _escalate_code(old, new):
    for x in (constants.INFRA_EXIT_CODE,
              constants.ERROR_EXIT_CODE,
              constants.WARNING_EXIT_CODE):
      if x in (old, new):
        return x
    return 0

  for _ in repetitions:
    iteration_results = base_test_result.TestRunResults()
    if java_tests:
      test_results, test_exit_code = test_dispatcher.RunTests(
          java_tests, java_runner_factory, devices, shard=True,
          test_timeout=None, num_retries=args.num_retries)
      iteration_results.AddTestRunResults(test_results)

      code_counts[test_exit_code] += 1
      exit_code = _escalate_code(exit_code, test_exit_code)

    if py_tests:
      test_results, test_exit_code = test_dispatcher.RunTests(
          py_tests, py_runner_factory, devices, shard=True, test_timeout=None,
          num_retries=args.num_retries)
      iteration_results.AddTestRunResults(test_results)

      code_counts[test_exit_code] += 1
      exit_code = _escalate_code(exit_code, test_exit_code)

    results.append(iteration_results)
    report_results.LogFull(
        results=iteration_results,
        test_type='Instrumentation',
        test_package=os.path.basename(args.test_apk),
        annotation=args.annotations,
        flakiness_server=args.flakiness_dashboard_server)


    if args.break_on_failure and exit_code in (constants.ERROR_EXIT_CODE,
                                               constants.INFRA_EXIT_CODE):
      break

  logging.critical('Instr tests: %s success, %s infra, %s errors, %s warnings',
                   str(code_counts[0]),
                   str(code_counts[constants.INFRA_EXIT_CODE]),
                   str(code_counts[constants.ERROR_EXIT_CODE]),
                   str(code_counts[constants.WARNING_EXIT_CODE]))

  if args.json_results_file:
    json_results.GenerateJsonResultsFile(results, args.json_results_file)

  return exit_code
Exemple #18
0
 def write_json_file():
     try:
         yield
     finally:
         json_results.GenerateJsonResultsFile(all_raw_results,
                                              args.json_results_file)
def RunTestsInPlatformMode(args):

  def infra_error(message):
    logging.fatal(message)
    sys.exit(constants.INFRA_EXIT_CODE)

  if args.command not in _SUPPORTED_IN_PLATFORM_MODE:
    infra_error('%s is not yet supported in platform mode' % args.command)

  with environment_factory.CreateEnvironment(args, infra_error) as env:
    with test_instance_factory.CreateTestInstance(args, infra_error) as test:
      with test_run_factory.CreateTestRun(
          args, env, test, infra_error) as test_run:
        results = []
        repetitions = (xrange(args.repeat + 1) if args.repeat >= 0
                       else itertools.count())
        result_counts = collections.defaultdict(
            lambda: collections.defaultdict(int))
        iteration_count = 0
        for _ in repetitions:
          iteration_results = test_run.RunTests()
          if iteration_results is not None:
            iteration_count += 1
            results.append(iteration_results)
            for r in iteration_results.GetAll():
              result_counts[r.GetName()][r.GetType()] += 1
            report_results.LogFull(
                results=iteration_results,
                test_type=test.TestType(),
                test_package=test_run.TestPackage(),
                annotation=getattr(args, 'annotations', None),
                flakiness_server=getattr(args, 'flakiness_dashboard_server',
                                         None))
            if args.break_on_failure and not iteration_results.DidRunPass():
              break

        if iteration_count > 1:
          # display summary results
          # only display results for a test if at least one test did not pass
          all_pass = 0
          tot_tests = 0
          for test_name in result_counts:
            tot_tests += 1
            if any(result_counts[test_name][x] for x in (
                base_test_result.ResultType.FAIL,
                base_test_result.ResultType.CRASH,
                base_test_result.ResultType.TIMEOUT,
                base_test_result.ResultType.UNKNOWN)):
              logging.critical(
                  '%s: %s',
                  test_name,
                  ', '.join('%s %s' % (str(result_counts[test_name][i]), i)
                            for i in base_test_result.ResultType.GetTypes()))
            else:
              all_pass += 1

          logging.critical('%s of %s tests passed in all %s runs',
                           str(all_pass),
                           str(tot_tests),
                           str(iteration_count))

        if args.json_results_file:
          json_results.GenerateJsonResultsFile(
              results, args.json_results_file)

  return (0 if all(r.DidRunPass() for r in results)
          else constants.ERROR_EXIT_CODE)