예제 #1
0
def _RunPerfTests(options, args, error_func, devices):
    """Subcommand of RunTestsCommands which runs perf tests."""
    perf_options = ProcessPerfTestOptions(options, args, error_func)
    # Just print the results from a single previously executed step.
    if perf_options.print_step:
        return perf_test_runner.PrintTestOutput(perf_options.print_step)

    runner_factory, tests = perf_setup.Setup(perf_options)

    results, _ = test_dispatcher.RunTests(tests,
                                          runner_factory,
                                          devices,
                                          shard=True,
                                          test_timeout=None,
                                          num_retries=options.num_retries)

    report_results.LogFull(results=results,
                           test_type='Perf',
                           test_package='Perf')

    if perf_options.single_step:
        return perf_test_runner.PrintTestOutput('single_step')

    perf_test_runner.PrintSummary(tests)

    # Always return 0 on the sharding stage. Individual tests exit_code
    # will be returned on the print_step stage.
    return 0
예제 #2
0
def DispatchPythonTests(options):
  """Dispatches the Monkey tests, sharding it if there multiple devices."""
  logger = logging.getLogger()
  logger.setLevel(logging.DEBUG)
  attached_devices = android_commands.GetAttachedDevices()
  if not attached_devices:
    raise Exception('You have no devices attached or visible!')

  # Actually run the tests.
  logging.debug('Running monkey tests.')
  # TODO(frankf): This is a stop-gap solution. Come up with a
  # general way for running tests on every devices.
  available_tests = []
  for k in range(len(attached_devices)):
    new_method = 'testMonkey%d' % k
    setattr(MonkeyTest, new_method, MonkeyTest.testMonkey)
    available_tests.append(MonkeyTest(new_method))
  options.ensure_value('shard_retries', 1)
  sharder = python_test_sharder.PythonTestSharder(
      attached_devices, available_tests, options)
  results = sharder.RunShardedTests()
  report_results.LogFull(
      results=results,
      test_type='Monkey',
      test_package='Monkey',
      build_type=options.build_type)
  report_results.PrintAnnotation(results)
예제 #3
0
def _RunGTests(options, devices):
    """Subcommand of RunTestsCommands which runs gtests."""
    ProcessGTestOptions(options)

    exit_code = 0
    for suite_name in options.suite_name:
        # TODO(gkanwar): Move this into ProcessGTestOptions once we require -s for
        # the gtest command.
        gtest_options = gtest_test_options.GTestOptions(
            options.tool, options.cleanup_test_files, options.push_deps,
            options.test_filter, options.run_disabled, options.test_arguments,
            options.timeout, suite_name)
        runner_factory, tests = gtest_setup.Setup(gtest_options, devices)

        results, test_exit_code = test_dispatcher.RunTests(
            tests,
            runner_factory,
            devices,
            shard=True,
            test_timeout=None,
            num_retries=options.num_retries)

        if test_exit_code and exit_code != constants.ERROR_EXIT_CODE:
            exit_code = test_exit_code

        report_results.LogFull(
            results=results,
            test_type='Unit test',
            test_package=suite_name,
            flakiness_server=options.flakiness_dashboard_server)

    if os.path.isdir(constants.ISOLATE_DEPS_DIR):
        shutil.rmtree(constants.ISOLATE_DEPS_DIR)

    return exit_code
예제 #4
0
def _RunInstrumentationTests(options, error_func, devices):
    """Subcommand of RunTestsCommands which runs instrumentation tests."""
    instrumentation_options = ProcessInstrumentationOptions(
        options, error_func)

    if len(devices) > 1 and options.wait_for_debugger:
        logging.warning(
            'Debugger can not be sharded, using first available device')
        devices = devices[:1]

    results = base_test_result.TestRunResults()
    exit_code = 0

    if options.run_java_tests:
        runner_factory, tests = instrumentation_setup.Setup(
            instrumentation_options, devices)

        test_results, exit_code = test_dispatcher.RunTests(
            tests,
            runner_factory,
            devices,
            shard=True,
            test_timeout=None,
            num_retries=options.num_retries)

        results.AddTestRunResults(test_results)

    if options.run_python_tests:
        runner_factory, tests = host_driven_setup.InstrumentationSetup(
            options.host_driven_root, options.official_build,
            instrumentation_options)

        if tests:
            test_results, test_exit_code = test_dispatcher.RunTests(
                tests,
                runner_factory,
                devices,
                shard=True,
                test_timeout=None,
                num_retries=options.num_retries)

            results.AddTestRunResults(test_results)

            # Only allow exit code escalation
            if test_exit_code and exit_code != constants.ERROR_EXIT_CODE:
                exit_code = test_exit_code

    if options.device_flags:
        options.device_flags = os.path.join(constants.DIR_SOURCE_ROOT,
                                            options.device_flags)

    report_results.LogFull(results=results,
                           test_type='Instrumentation',
                           test_package=os.path.basename(options.test_apk),
                           annotation=options.annotations,
                           flakiness_server=options.flakiness_dashboard_server)

    return exit_code
예제 #5
0
def Dispatch(options):
    attached_devices = []
    if options.test_device:
        attached_devices = [options.test_device]
    else:
        attached_devices = android_commands.GetAttachedDevices()

    if not attached_devices:
        logging.critical('A device must be attached and online.')
        return 1

    # Reset the test port allocation. It's important to do it before starting
    # to dispatch any tests.
    if not ports.ResetTestServerPortAllocation():
        raise Exception('Failed to reset test server port.')

    test_suite_dir = os.path.join(cmd_helper.OutDirectory.get(),
                                  options.build_type)
    options.test_suite = os.path.join(
        test_suite_dir, 'apks', constants.BROWSERTEST_SUITE_NAME + '.apk')

    # Constructs a new TestRunner with the current options.
    def RunnerFactory(device, shard_index):
        return test_runner.TestRunner(device, options.test_suite,
                                      options.test_arguments, options.timeout,
                                      options.cleanup_test_files, options.tool,
                                      options.build_type, options.webkit,
                                      constants.BROWSERTEST_TEST_PACKAGE_NAME,
                                      constants.BROWSERTEST_TEST_ACTIVITY_NAME,
                                      constants.BROWSERTEST_COMMAND_LINE_FILE)

        # Get tests and split them up based on the number of devices.

    if options.gtest_filter:
        all_tests = [t for t in options.gtest_filter.split(':') if t]
    else:
        all_enabled = gtest_dispatch.GetAllEnabledTests(
            RunnerFactory, attached_devices)
        all_tests = _FilterTests(all_enabled)

    # Run tests.
    # TODO(nileshagrawal): remove this abnormally long setup timeout once fewer
    # files are pushed to the devices for content_browsertests: crbug.com/138275
    setup_timeout = 20 * 60  # 20 minutes
    test_results = shard.ShardAndRunTests(RunnerFactory,
                                          attached_devices,
                                          all_tests,
                                          options.build_type,
                                          setup_timeout=setup_timeout,
                                          test_timeout=None,
                                          num_retries=options.num_retries)
    report_results.LogFull(results=test_results,
                           test_type='Unit test',
                           test_package=constants.BROWSERTEST_SUITE_NAME,
                           build_type=options.build_type,
                           flakiness_server=options.flakiness_dashboard_server)
    report_results.PrintAnnotation(test_results)
예제 #6
0
def _RunInstrumentationTests(options, error_func):
    """Subcommand of RunTestsCommands which runs instrumentation tests."""
    instrumentation_options = ProcessInstrumentationOptions(
        options, error_func)

    results = base_test_result.TestRunResults()
    exit_code = 0

    if options.run_java_tests:
        runner_factory, tests = instrumentation_setup.Setup(
            instrumentation_options)

        test_results, exit_code = test_dispatcher.RunTests(
            tests,
            runner_factory,
            options.wait_for_debugger,
            options.test_device,
            shard=True,
            build_type=options.build_type,
            test_timeout=None,
            num_retries=options.num_retries)

        results.AddTestRunResults(test_results)

    if options.run_python_tests:
        runner_factory, tests = host_driven_setup.InstrumentationSetup(
            options.python_test_root, options.official_build,
            instrumentation_options)

        if tests:
            test_results, test_exit_code = test_dispatcher.RunTests(
                tests,
                runner_factory,
                False,
                options.test_device,
                shard=True,
                build_type=options.build_type,
                test_timeout=None,
                num_retries=options.num_retries)

            results.AddTestRunResults(test_results)

            # Only allow exit code escalation
            if test_exit_code and exit_code != constants.ERROR_EXIT_CODE:
                exit_code = test_exit_code

    report_results.LogFull(results=results,
                           test_type='Instrumentation',
                           test_package=os.path.basename(options.test_apk),
                           annotation=options.annotations,
                           build_type=options.build_type,
                           flakiness_server=options.flakiness_dashboard_server)

    return exit_code
예제 #7
0
def _RunLinkerTests(options, devices):
  """Subcommand of RunTestsCommands which runs linker tests."""
  runner_factory, tests = linker_setup.Setup(options, devices)

  results, exit_code = test_dispatcher.RunTests(
      tests, runner_factory, devices, shard=True, test_timeout=60,
      num_retries=options.num_retries)

  report_results.LogFull(
      results=results,
      test_type='Linker test',
      test_package='ChromiumLinkerTest')

  return exit_code
예제 #8
0
def _RunMonkeyTests(options, error_func, devices):
  """Subcommand of RunTestsCommands which runs monkey tests."""
  monkey_options = ProcessMonkeyTestOptions(options, error_func)

  runner_factory, tests = monkey_setup.Setup(monkey_options)

  results, exit_code = test_dispatcher.RunTests(
      tests, runner_factory, devices, shard=False, test_timeout=None,
      num_retries=options.num_retries)

  report_results.LogFull(
      results=results,
      test_type='Monkey',
      test_package='Monkey')

  return exit_code
예제 #9
0
def _RunUIAutomatorTests(options, error_func, devices):
  """Subcommand of RunTestsCommands which runs uiautomator tests."""
  uiautomator_options = ProcessUIAutomatorOptions(options, error_func)

  runner_factory, tests = uiautomator_setup.Setup(uiautomator_options)

  results, exit_code = test_dispatcher.RunTests(
      tests, runner_factory, devices, shard=True, test_timeout=None,
      num_retries=options.num_retries)

  report_results.LogFull(
      results=results,
      test_type='UIAutomator',
      test_package=os.path.basename(options.test_jar),
      annotation=options.annotations,
      flakiness_server=options.flakiness_dashboard_server)

  return exit_code
예제 #10
0
def _RunUIAutomatorTests(options, error_func):
    """Subcommand of RunTestsCommands which runs uiautomator tests."""
    uiautomator_options = ProcessUIAutomatorOptions(options, error_func)

    results = base_test_result.TestRunResults()
    exit_code = 0

    if options.run_java_tests:
        runner_factory, tests = uiautomator_setup.Setup(uiautomator_options)

        test_results, exit_code = test_dispatcher.RunTests(
            tests,
            runner_factory,
            False,
            options.test_device,
            shard=True,
            build_type=options.build_type,
            test_timeout=None,
            num_retries=options.num_retries)

        results.AddTestRunResults(test_results)

    if options.run_python_tests:
        test_results, test_exit_code = (
            python_dispatch.DispatchPythonTests(options))

        results.AddTestRunResults(test_results)

        # Only allow exit code escalation
        if test_exit_code and exit_code != constants.ERROR_EXIT_CODE:
            exit_code = test_exit_code

    report_results.LogFull(results=results,
                           test_type='UIAutomator',
                           test_package=os.path.basename(options.test_jar),
                           annotation=options.annotations,
                           build_type=options.build_type,
                           flakiness_server=options.flakiness_dashboard_server)

    return exit_code
예제 #11
0
def RunTestsInPlatformMode(command, options, option_parser):

    if command not in _SUPPORTED_IN_PLATFORM_MODE:
        option_parser.error('%s is not yet supported in platform mode' %
                            command)

    with environment_factory.CreateEnvironment(command, options,
                                               option_parser.error) as env:
        with test_instance_factory.CreateTestInstance(
                command, options, option_parser.error) as test:
            with test_run_factory.CreateTestRun(
                    options, env, test, option_parser.error) as test_run:
                results = test_run.RunTests()

                report_results.LogFull(
                    results=results,
                    test_type=test.TestType(),
                    test_package=test_run.TestPackage(),
                    annotation=options.annotations,
                    flakiness_server=options.flakiness_dashboard_server)

    return results
예제 #12
0
def _RunPerfTests(options, args, error_func):
    """Subcommand of RunTestsCommands which runs perf tests."""
    perf_options = ProcessPerfTestOptions(options, args, error_func)

    # Just save a simple json with a list of test names.
    if perf_options.output_json_list:
        return perf_test_runner.OutputJsonList(perf_options.steps,
                                               perf_options.output_json_list)

    # Just print the results from a single previously executed step.
    if perf_options.print_step:
        return perf_test_runner.PrintTestOutput(perf_options.print_step)

    runner_factory, tests, devices = perf_setup.Setup(perf_options)

    # shard=False means that each device will get the full list of tests
    # and then each one will decide their own affinity.
    # shard=True means each device will pop the next test available from a queue,
    # which increases throughput but have no affinity.
    results, _ = test_dispatcher.RunTests(tests,
                                          runner_factory,
                                          devices,
                                          shard=False,
                                          test_timeout=None,
                                          num_retries=options.num_retries)

    report_results.LogFull(results=results,
                           test_type='Perf',
                           test_package='Perf')

    if perf_options.single_step:
        return perf_test_runner.PrintTestOutput('single_step')

    perf_test_runner.PrintSummary(tests)

    # Always return 0 on the sharding stage. Individual tests exit_code
    # will be returned on the print_step stage.
    return 0
def DispatchUIAutomatorTests(options):
    """Dispatches the UIAutomator tests, sharding if possible.

  Uses the logging module to print the combined final results and
  summary of the Java and Python tests. If the java_only option is set, only
  the Java tests run. If the python_only option is set, only the python tests
  run. If neither are set, run both Java and Python tests.

  Args:
    options: command-line options for running the Java and Python tests.

  Returns:
    An integer representing the number of broken tests.
  """
    if not options.keep_test_server_ports:
        # Reset the test port allocation. It's important to do it before starting
        # to dispatch any tests.
        if not ports.ResetTestServerPortAllocation():
            raise Exception('Failed to reset test server port.')

    all_results = base_test_result.TestRunResults()

    if options.run_java_tests:
        all_results.AddTestRunResults(dispatch.Dispatch(options))
    if options.run_python_tests:
        all_results.AddTestRunResults(
            run_python_tests.DispatchPythonTests(options))

    report_results.LogFull(results=all_results,
                           test_type='UIAutomator',
                           test_package=os.path.basename(options.test_jar),
                           annotation=options.annotation,
                           build_type=options.build_type,
                           flakiness_server=options.flakiness_dashboard_server)

    return len(all_results.GetNotPass())
예제 #14
0
def _RunATestSuite(options, suite_name):
    """Run a single test suite.

  Helper for Dispatch() to allow stop/restart of the emulator across
  test bundles.  If using the emulator, we start it on entry and stop
  it on exit.

  Args:
    options: options for running the tests.
    suite_name: name of the test suite being run.

  Returns:
    0 if successful, number of failing tests otherwise.
  """
    step_name = os.path.basename(options.test_suite).replace('-debug.apk', '')
    attached_devices = []
    buildbot_emulators = []

    if options.use_emulator:
        buildbot_emulators = emulator.LaunchEmulators(options.emulator_count,
                                                      options.abi,
                                                      wait_for_boot=True)
        attached_devices = [e.device for e in buildbot_emulators]
    elif options.test_device:
        attached_devices = [options.test_device]
    else:
        attached_devices = android_commands.GetAttachedDevices()

    if not attached_devices:
        logging.critical('A device must be attached and online.')
        return 1

    # Reset the test port allocation. It's important to do it before starting
    # to dispatch any tests.
    if not ports.ResetTestServerPortAllocation():
        raise Exception('Failed to reset test server port.')

    # Constructs a new TestRunner with the current options.
    def RunnerFactory(device, shard_index):
        return test_runner.TestRunner(device, options.test_suite,
                                      options.test_arguments, options.timeout,
                                      options.cleanup_test_files, options.tool,
                                      options.build_type, options.webkit,
                                      constants.GTEST_TEST_PACKAGE_NAME,
                                      constants.GTEST_TEST_ACTIVITY_NAME,
                                      constants.GTEST_COMMAND_LINE_FILE)

    # Get tests and split them up based on the number of devices.
    if options.gtest_filter:
        all_tests = [t for t in options.gtest_filter.split(':') if t]
    else:
        all_tests = GetAllEnabledTests(RunnerFactory, attached_devices)
    num_devices = len(attached_devices)
    tests = [':'.join(all_tests[i::num_devices]) for i in xrange(num_devices)]
    tests = [t for t in tests if t]

    # Run tests.
    test_results = shard.ShardAndRunTests(RunnerFactory,
                                          attached_devices,
                                          tests,
                                          options.build_type,
                                          test_timeout=None,
                                          num_retries=options.num_retries)

    report_results.LogFull(results=test_results,
                           test_type='Unit test',
                           test_package=suite_name,
                           build_type=options.build_type,
                           flakiness_server=options.flakiness_dashboard_server)
    report_results.PrintAnnotation(test_results)

    for buildbot_emulator in buildbot_emulators:
        buildbot_emulator.Shutdown()

    return len(test_results.GetNotPass())