Exemple #1
0
def DispatchInstrumentationTests(options):
    """Dispatches the Java and Python instrumentation tests, sharding if possible.

  Uses the logging module to print the combined final results and
  summary of the Java and Python tests. If the java_only option is set, only
  the Java tests run. If the python_only option is set, only the python tests
  run. If neither are set, run both Java and Python tests.

  Args:
    options: command-line options for running the Java and Python tests.

  Returns:
    An integer representing the number of failing tests.
  """
    start_date = int(time.time() * 1000)
    java_results = TestResults()
    python_results = TestResults()

    if options.run_java_tests:
        java_results = run_java_tests.DispatchJavaTests(
            options, [
                apk_info.ApkInfo(options.test_apk_path,
                                 options.test_apk_jar_path)
            ])
    if options.run_python_tests:
        python_results = run_python_tests.DispatchPythonTests(options)

    all_results, summary_string, num_failing = SummarizeResults(
        java_results, python_results, options.annotation)
    return num_failing
Exemple #2
0
def DispatchInstrumentationTests(options):
    """Dispatches the Java and Python instrumentation tests, sharding if possible.

  Uses the logging module to print the combined final results and
  summary of the Java and Python tests. If the java_only option is set, only
  the Java tests run. If the python_only option is set, only the python tests
  run. If neither are set, run both Java and Python tests.

  Args:
    options: command-line options for running the Java and Python tests.

  Returns:
    An integer representing the number of failing tests.
  """
    # Reset the test port allocation. It's important to do it before starting
    # to dispatch any tests.
    if not ports.ResetTestServerPortAllocation():
        raise Exception('Failed to reset test server port.')
    start_date = int(time.time() * 1000)
    java_results = TestResults()
    python_results = TestResults()

    if options.run_java_tests:
        java_results = run_java_tests.DispatchJavaTests(
            options, [
                apk_info.ApkInfo(options.test_apk_path,
                                 options.test_apk_jar_path)
            ])
    if options.run_python_tests:
        python_results = run_python_tests.DispatchPythonTests(options)

    all_results, summary_string, num_failing = SummarizeResults(
        java_results, python_results, options.annotation, options.build_type)
    return num_failing
  def RunShardedTests(self):
    """Runs tests in parallel using a pool of workers.

    Returns:
      A list of test results aggregated from all test runs.
    """
    logging.warning('*' * 80)
    logging.warning('Sharding in ' + str(len(self.attached_devices)) +
                    ' devices.')
    logging.warning('Note that the output is not synchronized.')
    logging.warning('Look for the "Final result" banner in the end.')
    logging.warning('*' * 80)
    all_passed = []
    test_results = TestResults()
    tests_to_run = self.tests
    for retry in xrange(self.retries):
      logging.warning('Try %d of %d', retry + 1, self.retries)
      self._SetupSharding(self.tests)
      test_runners = self._MakeTestRunners(self.attached_devices)
      logging.warning('Starting...')
      pool = multiprocessing.Pool(len(self.attached_devices),
                                  SetTestsContainer,
                                  [PythonTestSharder.tests_container])

      # List of TestResults objects from each test execution.
      try:
        results_lists = pool.map(_DefaultRunnable, test_runners)
      except Exception:
        logging.exception('Unable to run tests. Something with the '
                          'PythonTestRunners has gone wrong.')
        raise FatalTestException('PythonTestRunners were unable to run tests.')

      test_results = TestResults.FromTestResults(results_lists)
      # Accumulate passing results.
      all_passed += test_results.ok
      # If we have failed tests, map them to tests to retry.
      failed_tests = test_results.GetAllBroken()
      tests_to_run = self._GetTestsToRetry(self.tests,
                                           failed_tests)

      # Bail out early if we have no more tests. This can happen if all tests
      # pass before we're out of retries, for example.
      if not tests_to_run:
        break

    final_results = TestResults()
    # all_passed has accumulated all passing test results.
    # test_results will have the results from the most recent run, which could
    # include a variety of failure modes (unknown, crashed, failed, etc).
    final_results = test_results
    final_results.ok = all_passed

    return final_results
    def __init__(self, device, test_suite, gtest_filter, test_arguments,
                 timeout, cleanup_test_files, tool_name, shard_index,
                 dump_debug_info, build_type, in_webkit_checkout):
        BaseTestRunner.__init__(self, device, tool_name, shard_index,
                                build_type)
        self._running_on_emulator = self.device.startswith('emulator')
        self._gtest_filter = gtest_filter
        self._test_arguments = test_arguments
        self.test_results = TestResults()
        if dump_debug_info:
            self.dump_debug_info = debug_info.GTestDebugInfo(
                self.adb, device, os.path.basename(test_suite), gtest_filter)
        else:
            self.dump_debug_info = None
        self.in_webkit_checkout = in_webkit_checkout

        logging.warning('Test suite: ' + test_suite)
        if os.path.splitext(test_suite)[1] == '.apk':
            self.test_package = TestPackageApk(self.adb, device, test_suite,
                                               timeout, cleanup_test_files,
                                               self.tool, self.dump_debug_info)
        else:
            # Put a copy into the android out/target directory, to allow stack trace
            # generation.
            symbols_dir = os.path.join(constants.CHROME_DIR, 'out', build_type,
                                       'lib.target')
            self.test_package = TestPackageExecutable(
                self.adb, device, test_suite, timeout, cleanup_test_files,
                self.tool, self.dump_debug_info, symbols_dir)
    def RunTests(self):
        """Runs tests on a single device.

    Returns:
      A TestResults object.
    """
        try:
            self.test_package.CreateTestRunnerScript(self._gtest_filter,
                                                     self._test_arguments)
            self.test_results = self.test_package.RunTestsAndListResults()
        except errors.DeviceUnresponsiveError as e:
            # Make sure this device is not attached
            if android_commands.IsDeviceAttached(self.device):
                raise e

            # TODO(frankf): We should report these as "skipped" not "failures".
            # Wrap the results
            logging.warning(e)
            failed_tests = []
            for t in self._gtest_filter.split(':'):
                failed_tests += [BaseTestResult(t, '')]
            self.test_results = TestResults.FromRun(
                failed=failed_tests, device_exception=self.device)

        return self.test_results
  def _ProcessResults(self, result, start_ms, duration_ms):
    """Translates a Java test result into a Python result for this test.

    The TestRunner class that we use under the covers will return a test result
    for that specific Java test. However, to make reporting clearer, we have
    this method to abstract that detail and instead report that as a failure of
    this particular test case while still including the Java stack trace.

    Args:
      result: TestResults with a single Java test result
      start_ms: the time the test started
      duration_ms: the length of the test

    Returns:
      A TestResults object containing a result for this Python test.
    """
    test_results = TestResults()

    # If our test is in broken, then it crashed/failed.
    broken = result.GetAllBroken()
    if broken:
      # Since we have run only one test, take the first and only item.
      single_result = broken[0]

      log = single_result.log
      if not log:
        log = 'No logging information.'

      python_result = SingleTestResult(self.qualified_name, start_ms,
                                       duration_ms,
                                       log)

      # Figure out where the test belonged. There's probably a cleaner way of
      # doing this.
      if single_result in result.crashed:
        test_results.crashed = [python_result]
      elif single_result in result.failed:
        test_results.failed = [python_result]
      elif single_result in result.unknown:
        test_results.unknown = [python_result]

    else:
      python_result = SingleTestResult(self.qualified_name, start_ms,
                                       duration_ms)
      test_results.ok = [python_result]

    return test_results
def DispatchInstrumentationTests(options):
    """Dispatches the Java and Python instrumentation tests, sharding if possible.

  Uses the logging module to print the combined final results and
  summary of the Java and Python tests. If the java_only option is set, only
  the Java tests run. If the python_only option is set, only the python tests
  run. If neither are set, run both Java and Python tests.

  Args:
    options: command-line options for running the Java and Python tests.

  Returns:
    An integer representing the number of broken tests.
  """
    if not options.keep_test_server_ports:
        # Reset the test port allocation. It's important to do it before starting
        # to dispatch any tests.
        if not ports.ResetTestServerPortAllocation():
            raise Exception('Failed to reset test server port.')

    start_date = int(time.time() * 1000)
    java_results = TestResults()
    python_results = TestResults()

    if options.run_java_tests:
        java_results = run_java_tests.DispatchJavaTests(
            options, [
                apk_info.ApkInfo(options.test_apk_path,
                                 options.test_apk_jar_path)
            ])
    if options.run_python_tests:
        python_results = run_python_tests.DispatchPythonTests(options)

    all_results = TestResults.FromTestResults([java_results, python_results])

    all_results.LogFull(test_type='Instrumentation',
                        test_package=options.test_apk,
                        annotation=options.annotation,
                        build_type=options.build_type,
                        flakiness_server=options.flakiness_dashboard_server)

    return len(all_results.GetAllBroken())
def DispatchPythonTests(options):
    """Dispatches the Python tests. If there are multiple devices, use sharding.

  Args:
    options: command line options.

  Returns:
    A list of test results.
  """

    attached_devices = android_commands.GetAttachedDevices()
    if not attached_devices:
        raise FatalTestException('You have no devices attached or visible!')
    if options.device:
        attached_devices = [options.device]

    test_collection = TestInfoCollection()
    all_tests = _GetAllTests(options.python_test_root, options.official_build)
    test_collection.AddTests(all_tests)
    test_names = [t.qualified_name for t in all_tests]
    logging.debug('All available tests: ' + str(test_names))

    available_tests = test_collection.GetAvailableTests(
        options.annotation, options.test_filter)

    if not available_tests:
        logging.warning('No Python tests to run with current args.')
        return TestResults()

    available_tests *= options.number_of_runs
    test_names = [t.qualified_name for t in available_tests]
    logging.debug('Final list of tests to run: ' + str(test_names))

    # Copy files to each device before running any tests.
    for device_id in attached_devices:
        logging.debug('Pushing files to device %s', device_id)
        apks = [
            apk_info.ApkInfo(options.test_apk_path, options.test_apk_jar_path)
        ]
        test_files_copier = run_java_tests.TestRunner(options, device_id, None,
                                                      False, 0, apks, [])
        test_files_copier.CopyTestFilesOnce()

    # Actually run the tests.
    if len(attached_devices) > 1 and options.wait_for_debugger:
        logging.warning('Debugger can not be sharded, '
                        'using first available device')
        attached_devices = attached_devices[:1]
    logging.debug('Running Python tests')
    sharder = PythonTestSharder(attached_devices, available_tests, options)
    test_results = sharder.RunShardedTests()

    return test_results
    def RunShardedTests(self):
        """Runs tests in parallel using a pool of workers.

    Returns:
      A list of test results aggregated from all test runs.
    """
        logging.warning('*' * 80)
        logging.warning('Sharding in ' + str(len(self.attached_devices)) +
                        ' devices.')
        logging.warning('Note that the output is not synchronized.')
        logging.warning('Look for the "Final result" banner in the end.')
        logging.warning('*' * 80)
        all_passed = []
        test_results = TestResults()
        tests_to_run = self.tests
        for retry in xrange(self.retries):
            logging.warning('Try %d of %d', retry + 1, self.retries)
            self._SetupSharding(self.tests)
            test_runners = self._MakeTestRunners(self.attached_devices)
            logging.warning('Starting...')
            pool = multiprocessing.Pool(len(self.attached_devices),
                                        SetTestsContainer,
                                        [PythonTestSharder.tests_container])

            # List of TestResults objects from each test execution.
            try:
                results_lists = pool.map(_DefaultRunnable, test_runners)
            except Exception:
                logging.exception('Unable to run tests. Something with the '
                                  'PythonTestRunners has gone wrong.')
                raise FatalTestException(
                    'PythonTestRunners were unable to run tests.')

            test_results = TestResults.FromTestResults(results_lists)
            # Accumulate passing results.
            all_passed += test_results.ok
            # If we have failed tests, map them to tests to retry.
            failed_tests = test_results.GetAllBroken()
            tests_to_run = self._GetTestsToRetry(self.tests, failed_tests)

            # Bail out early if we have no more tests. This can happen if all tests
            # pass before we're out of retries, for example.
            if not tests_to_run:
                break

        final_results = TestResults()
        # all_passed has accumulated all passing test results.
        # test_results will have the results from the most recent run, which could
        # include a variety of failure modes (unknown, crashed, failed, etc).
        final_results = test_results
        final_results.ok = all_passed

        return final_results
    def RunTests(self):
        """Runs tests from the shared pool of tests, aggregating results.

    Returns:
      A list of test results for all of the tests which this runner executed.
    """
        tests = PythonTestSharder.tests_container

        results = []
        for t in tests:
            res = CallPythonTest(t, self.options)
            results.append(res)

        return TestResults.FromTestResults(results)
Exemple #11
0
def SummarizeResults(java_results, python_results, annotation):
    """Summarize the results from the various test types.

  Args:
    java_results: a TestResults object with java test case results.
    python_results: a TestResults object with python test case results.
    annotation: the annotation used for these results.

  Returns:
    A tuple (all_results, summary_string, num_failing)
  """
    all_results = TestResults.FromTestResults([java_results, python_results])
    summary_string = all_results.LogFull('Instrumentation', annotation)
    num_failing = (len(all_results.failed) + len(all_results.crashed) +
                   len(all_results.unknown))
    return all_results, summary_string, num_failing
    def _ProcessResults(self, result, start_ms, duration_ms):
        """Translates a Java test result into a Python result for this test.

    The TestRunner class that we use under the covers will return a test result
    for that specific Java test. However, to make reporting clearer, we have
    this method to abstract that detail and instead report that as a failure of
    this particular test case while still including the Java stack trace.

    Args:
      result: TestResults with a single Java test result
      start_ms: the time the test started
      duration_ms: the length of the test

    Returns:
      A TestResults object containing a result for this Python test.
    """
        test_results = TestResults()

        # If our test is in broken, then it crashed/failed.
        broken = result.GetAllBroken()
        if broken:
            # Since we have run only one test, take the first and only item.
            single_result = broken[0]

            log = single_result.log
            if not log:
                log = 'No logging information.'

            python_result = SingleTestResult(self.qualified_name, start_ms,
                                             duration_ms, log)

            # Figure out where the test belonged. There's probably a cleaner way of
            # doing this.
            if single_result in result.crashed:
                test_results.crashed = [python_result]
            elif single_result in result.failed:
                test_results.failed = [python_result]
            elif single_result in result.unknown:
                test_results.unknown = [python_result]

        else:
            python_result = SingleTestResult(self.qualified_name, start_ms,
                                             duration_ms)
            test_results.ok = [python_result]

        return test_results
def CallPythonTest(test, options):
    """Invokes a test function and translates Python exceptions into test results.

  This method invokes SetUp()/TearDown() on the test. It is intended to be
  resilient to exceptions in SetUp(), the test itself, and TearDown(). Any
  Python exception means the test is marked as failed, and the test result will
  contain information about the exception.

  If SetUp() raises an exception, the test is not run.

  If TearDown() raises an exception, the test is treated as a failure. However,
  if the test itself raised an exception beforehand, that stack trace will take
  precedence whether or not TearDown() also raised an exception.

  shard_index is not applicable in single-device scenarios, when test execution
  is serial rather than parallel. Tests can use this to bring up servers with
  unique port numbers, for example. See also python_test_sharder.

  Args:
    test: an object which is ostensibly a subclass of PythonTestBase.
    options: Options to use for setting up tests.

  Returns:
    A TestResults object which contains any results produced by the test or, in
    the case of a Python exception, the Python exception info.
  """

    start_date_ms = int(time.time()) * 1000
    failed = False

    try:
        test.SetUp(options)
    except Exception:
        failed = True
        logging.exception(
            'Caught exception while trying to run SetUp() for test: ' +
            test.qualified_name)
        # Tests whose SetUp() method has failed are likely to fail, or at least
        # yield invalid results.
        exc_info = sys.exc_info()
        return TestResults.FromPythonException(test.qualified_name,
                                               start_date_ms, exc_info)

    try:
        result = test.Run()
    except Exception:
        # Setting this lets TearDown() avoid stomping on our stack trace from Run()
        # should TearDown() also raise an exception.
        failed = True
        logging.exception('Caught exception while trying to run test: ' +
                          test.qualified_name)
        exc_info = sys.exc_info()
        result = TestResults.FromPythonException(test.qualified_name,
                                                 start_date_ms, exc_info)

    try:
        test.TearDown()
    except Exception:
        logging.exception(
            'Caught exception while trying run TearDown() for test: ' +
            test.qualified_name)
        if not failed:
            # Don't stomp the error during the test if TearDown blows up. This is a
            # trade-off: if the test fails, this will mask any problem with TearDown
            # until the test is fixed.
            exc_info = sys.exc_info()
            result = TestResults.FromPythonException(test.qualified_name,
                                                     start_date_ms, exc_info)

    return result
Exemple #14
0
def RunTests(exe, device, test_suite, gtest_filter, test_arguments, rebaseline,
             timeout, performance_test, cleanup_test_files, tool,
             log_dump_name, fast_and_loose):
    """Runs the tests.

  Args:
    exe: boolean to state if we are using the exe based test runner
    device: Device to run the tests.
    test_suite: A specific test suite to run, empty to run all.
    gtest_filter: A gtest_filter flag.
    test_arguments: Additional arguments to pass to the test binary.
    rebaseline: Whether or not to run tests in isolation and update the filter.
    timeout: Timeout for each test.
    performance_test: Whether or not performance test(s).
    cleanup_test_files: Whether or not to cleanup test files on device.
    tool: Name of the Valgrind tool.
    log_dump_name: Name of log dump file.
    fast_and_loose: if set, skip copying data files.

  Returns:
    A TestResults object.
  """
    results = []

    if test_suite:
        if not os.path.exists(test_suite):
            logging.critical('Unrecognized test suite %s, supported: %s',
                             test_suite, _TEST_SUITES)
            if test_suite in _TEST_SUITES:
                logging.critical(
                    '(Remember to include the path: out/Release/%s)',
                    test_suite)
            test_suite_basename = os.path.basename(test_suite)
            if test_suite_basename in _TEST_SUITES:
                logging.critical('Try "make -j15 %s"', test_suite_basename)
            else:
                logging.critical('Unrecognized test suite, supported: %s',
                                 _TEST_SUITES)
            return TestResults.FromRun([], [BaseTestResult(test_suite, '')],
                                       False, False)
        fully_qualified_test_suites = [test_suite]
    else:
        fully_qualified_test_suites = FullyQualifiedTestSuites(
            exe, _TEST_SUITES)
    debug_info_list = []
    print 'Known suites: ' + str(_TEST_SUITES)
    print 'Running these: ' + str(fully_qualified_test_suites)
    for t in fully_qualified_test_suites:
        buildbot_report.PrintNamedStep('Test suite %s' % os.path.basename(t))
        test = SingleTestRunner(device, t, gtest_filter, test_arguments,
                                timeout, rebaseline, performance_test,
                                cleanup_test_files, tool, 0,
                                not not log_dump_name, fast_and_loose)
        test.Run()

        results += [test.test_results]
        # Collect debug info.
        debug_info_list += [test.dump_debug_info]
        if rebaseline:
            test.UpdateFilter(test.test_results.failed)
        test.test_results.LogFull('Unit test', os.path.basename(t))
    # Zip all debug info outputs into a file named by log_dump_name.
    debug_info.GTestDebugInfo.ZipAndCleanResults(
        os.path.join(constants.CHROME_DIR, 'out', 'Release',
                     'debug_info_dumps'), log_dump_name,
        [d for d in debug_info_list if d])

    PrintAnnotationForTestResults(test.test_results)

    return TestResults.FromTestResults(results)
Exemple #15
0
    def _WatchTestOutput(self, p):
        """Watches the test output.
    Args:
      p: the process generating output as created by pexpect.spawn.
    """
        ok_tests = []
        failed_tests = []
        crashed_tests = []
        timed_out = False
        overall_fail = False

        # Test case statuses.
        re_run = re.compile('\[ RUN      \] ?(.*)\r\n')
        re_fail = re.compile('\[  FAILED  \] ?(.*)\r\n')
        re_ok = re.compile('\[       OK \] ?(.*?) .*\r\n')

        # Test run statuses.
        re_passed = re.compile('\[  PASSED  \] ?(.*)\r\n')
        re_runner_fail = re.compile('\[ RUNNER_FAILED \] ?(.*)\r\n')
        # Signal handlers are installed before starting tests
        # to output the CRASHED marker when a crash happens.
        re_crash = re.compile('\[ CRASHED      \](.*)\r\n')

        try:
            while True:
                found = p.expect([re_run, re_passed, re_runner_fail],
                                 timeout=self.timeout)
                if found == 1:  # re_passed
                    break
                elif found == 2:  # re_runner_fail
                    overall_fail = True
                    break
                else:  # re_run
                    if self.dump_debug_info:
                        self.dump_debug_info.TakeScreenshot('_Test_Start_Run_')

                    full_test_name = p.match.group(1).replace('\r', '')
                    found = p.expect([re_ok, re_fail, re_crash],
                                     timeout=self.timeout)
                    if found == 0:  # re_ok
                        if full_test_name == p.match.group(1).replace(
                                '\r', ''):
                            ok_tests += [
                                BaseTestResult(full_test_name, p.before)
                            ]
                    elif found == 2:  # re_crash
                        crashed_tests += [
                            BaseTestResult(full_test_name, p.before)
                        ]
                        overall_fail = True
                        break
                    else:  # re_fail
                        failed_tests += [
                            BaseTestResult(full_test_name, p.before)
                        ]
        except pexpect.EOF:
            logging.error('Test terminated - EOF')
            raise errors.DeviceUnresponsiveError('Device may be offline')
        except pexpect.TIMEOUT:
            logging.error('Test terminated after %d second timeout.',
                          self.timeout)
            timed_out = True
        finally:
            p.close()

        ret_code = self._GetGTestReturnCode()
        if ret_code:
            logging.critical(
                'gtest exit code: %d\npexpect.before: %s\npexpect.after: %s',
                ret_code, p.before, p.after)
            overall_fail = True

        # Create TestResults and return
        return TestResults.FromRun(ok=ok_tests,
                                   failed=failed_tests,
                                   crashed=crashed_tests,
                                   timed_out=timed_out,
                                   overall_fail=overall_fail)
    def __init__(self, options, device, tests_iter, coverage, shard_index,
                 apks, ports_to_forward):
        """Create a new TestRunner.

    Args:
      options: An options object with the following required attributes:
      -  build_type: 'Release' or 'Debug'.
      -  install_apk: Re-installs the apk if opted.
      -  save_perf_json: Whether or not to save the JSON file from UI perf
            tests.
      -  screenshot_failures: Take a screenshot for a test failure
      -  tool: Name of the Valgrind tool.
      -  wait_for_debugger: blocks until the debugger is connected.
      -  disable_assertions: Whether to disable java assertions on the device.
      device: Attached android device.
      tests_iter: A list of tests to be run.
      coverage: Collects coverage information if opted.
      shard_index: shard # for this TestRunner, used to create unique port
          numbers.
      apks: A list of ApkInfo objects need to be installed. The first element
            should be the tests apk, the rests could be the apks used in test.
            The default is ChromeTest.apk.
      ports_to_forward: A list of port numbers for which to set up forwarders.
                        Can be optionally requested by a test case.
    Raises:
      FatalTestException: if coverage metadata is not available.
    """
        BaseTestRunner.__init__(self, device, options.tool, shard_index,
                                options.build_type)

        if not apks:
            apks = [
                apk_info.ApkInfo(options.test_apk_path,
                                 options.test_apk_jar_path)
            ]

        self.build_type = options.build_type
        self.install_apk = options.install_apk
        self.test_data = options.test_data
        self.save_perf_json = options.save_perf_json
        self.screenshot_failures = options.screenshot_failures
        self.wait_for_debugger = options.wait_for_debugger
        self.disable_assertions = options.disable_assertions

        self.tests_iter = tests_iter
        self.coverage = coverage
        self.apks = apks
        self.test_apk = apks[0]
        self.instrumentation_class_path = self.test_apk.GetPackageName()
        self.ports_to_forward = ports_to_forward

        self.test_results = TestResults()
        self.forwarder = None

        if self.coverage:
            if os.path.exists(TestRunner._COVERAGE_MERGED_FILENAME):
                os.remove(TestRunner._COVERAGE_MERGED_FILENAME)
            if not os.path.exists(TestRunner._COVERAGE_META_INFO_PATH):
                raise FatalTestException('FATAL ERROR in ' + sys.argv[0] +
                                         ' : Coverage meta info [' +
                                         TestRunner._COVERAGE_META_INFO_PATH +
                                         '] does not exist.')
            if (not TestRunner._COVERAGE_WEB_ROOT_DIR
                    or not os.path.exists(TestRunner._COVERAGE_WEB_ROOT_DIR)):
                raise FatalTestException(
                    'FATAL ERROR in ' + sys.argv[0] +
                    ' : Path specified in $EMMA_WEB_ROOTDIR [' +
                    TestRunner._COVERAGE_WEB_ROOT_DIR + '] does not exist.')
def DispatchJavaTests(options, apks):
    """Dispatches Java tests onto connected device(s).

  If possible, this method will attempt to shard the tests to
  all connected devices. Otherwise, dispatch and run tests on one device.

  Args:
    options: Command line options.
    apks: list of APKs to use.

  Returns:
    A TestResults object holding the results of the Java tests.

  Raises:
    FatalTestException: when there's no attached the devices.
  """
    test_apk = apks[0]
    # The default annotation for tests which do not have any sizes annotation.
    default_size_annotation = 'SmallTest'

    def _GetTestsMissingAnnotation(test_apk):
        test_size_annotations = frozenset([
            'Smoke', 'SmallTest', 'MediumTest', 'LargeTest', 'EnormousTest',
            'FlakyTest', 'DisabledTest', 'Manual', 'PerfTest'
        ])
        tests_missing_annotations = []
        for test_method in test_apk.GetTestMethods():
            annotations = frozenset(test_apk.GetTestAnnotations(test_method))
            if (annotations.isdisjoint(test_size_annotations)
                    and not apk_info.ApkInfo.IsPythonDrivenTest(test_method)):
                tests_missing_annotations.append(test_method)
        return sorted(tests_missing_annotations)

    if options.annotation:
        available_tests = test_apk.GetAnnotatedTests(options.annotation)
        if options.annotation.count(default_size_annotation) > 0:
            tests_missing_annotations = _GetTestsMissingAnnotation(test_apk)
            if tests_missing_annotations:
                logging.warning(
                    'The following tests do not contain any annotation. '
                    'Assuming "%s":\n%s', default_size_annotation,
                    '\n'.join(tests_missing_annotations))
                available_tests += tests_missing_annotations
    else:
        available_tests = [
            m for m in test_apk.GetTestMethods()
            if not apk_info.ApkInfo.IsPythonDrivenTest(m)
        ]
    coverage = os.environ.get('EMMA_INSTRUMENT') == 'true'

    tests = []
    if options.test_filter:
        # |available_tests| are in adb instrument format: package.path.class#test.
        filter_without_hash = options.test_filter.replace('#', '.')
        tests = [
            t for t in available_tests
            if filter_without_hash in t.replace('#', '.')
        ]
    else:
        tests = available_tests

    if not tests:
        logging.warning('No Java tests to run with current args.')
        return TestResults()

    tests *= options.number_of_runs

    attached_devices = android_commands.GetAttachedDevices()
    test_results = TestResults()

    if not attached_devices:
        raise FatalTestException('You have no devices attached or visible!')
    if options.device:
        attached_devices = [options.device]

    logging.info('Will run: %s', str(tests))

    if len(attached_devices) > 1 and (coverage or options.wait_for_debugger):
        logging.warning('Coverage / debugger can not be sharded, '
                        'using first available device')
        attached_devices = attached_devices[:1]
    sharder = TestSharder(attached_devices, options, tests, apks)
    test_results = sharder.RunShardedTests()
    return test_results