Пример #1
0
 def run_tests_on_device(dev, tests):
     r = base_test_result.TestRunResults()
     for test in tests:
         result = self._RunTest(dev, test)
         if isinstance(result, base_test_result.BaseTestResult):
             r.AddResult(result)
         elif isinstance(result, list):
             r.AddResults(result)
         else:
             raise Exception('Unexpected result type: %s' %
                             type(result).__name__)
         if isinstance(tests, test_collection.TestCollection):
             tests.test_completed()
     return r
Пример #2
0
 def post_run(self, return_code):
   if self._on_device_script:
     os.remove(self._on_device_script)
   # Create a simple json results file for a test run. The results will contain
   # only one test (suite_name), and will either be a PASS or FAIL depending on
   # return_code.
   if self._test_launcher_summary_output:
     result = (base_test_result.ResultType.FAIL if return_code else
                   base_test_result.ResultType.PASS)
     suite_result = base_test_result.BaseTestResult(self.suite_name, result)
     run_results = base_test_result.TestRunResults()
     run_results.AddResult(suite_result)
     with open(self._test_launcher_summary_output, 'w') as f:
       json.dump(json_results.GenerateResultsDict([run_results]), f)
Пример #3
0
  def RunTest(self, test):
    raw_result = None
    start_date_ms = None
    results = base_test_result.TestRunResults()
    timeout=(self._GetIndividualTestTimeoutSecs(test) *
             self._GetIndividualTestTimeoutScale(test) *
             self.tool.GetTimeoutScale())
    try:
      self.TestSetup(test)
      start_date_ms = int(time.time()) * 1000

      if self.is_uiautomator_test:
        raw_result = self._RunUIAutomatorTest(test, timeout)
      else:
        raw_result = self.adb.RunInstrumentationTest(
            test, self.test_pkg.GetPackageName(),
            self._GetInstrumentationArgs(), timeout)

      duration_ms = int(time.time()) * 1000 - start_date_ms
      status_code = raw_result.GetStatusCode()
      if status_code:
        log = raw_result.GetFailureReason()
        if not log:
          log = 'No information.'
        if self.screenshot_failures or log.find('INJECT_EVENTS perm') >= 0:
          self._TakeScreenshot(test)
        result = test_result.InstrumentationTestResult(
            test, base_test_result.ResultType.FAIL, start_date_ms, duration_ms,
            log=log)
      else:
        result = test_result.InstrumentationTestResult(
            test, base_test_result.ResultType.PASS, start_date_ms, duration_ms)
      results.AddResult(result)
    # Catch exceptions thrown by StartInstrumentation().
    # See ../../third_party/android/testrunner/adb_interface.py
    except (android_commands.errors.WaitForResponseTimedOutError,
            android_commands.errors.DeviceUnresponsiveError,
            android_commands.errors.InstrumentationError), e:
      if start_date_ms:
        duration_ms = int(time.time()) * 1000 - start_date_ms
      else:
        start_date_ms = int(time.time()) * 1000
        duration_ms = 0
      message = str(e)
      if not message:
        message = 'No information.'
      results.AddResult(test_result.InstrumentationTestResult(
          test, base_test_result.ResultType.CRASH, start_date_ms, duration_ms,
          log=message))
      raw_result = None
Пример #4
0
  def testGetTestsToRetry_testUnknown(self):
    results = [
        base_test_result.BaseTestResult(
            'Test2', base_test_result.ResultType.PASS),
    ]

    tests = ['Test1'] + [r.GetName() for r in results]
    try_results = base_test_result.TestRunResults()
    try_results.AddResults(results)

    test_run = TestLocalDeviceTestRun()
    tests_to_retry = test_run._GetTestsToRetry(tests, try_results)
    self.assertEquals(1, len(tests_to_retry))
    self.assertIn('Test1', tests_to_retry)
Пример #5
0
    def RunTest(self, test):
        results = base_test_result.TestRunResults()
        timeout = (self._GetIndividualTestTimeoutSecs(test) *
                   self._GetIndividualTestTimeoutScale(test) *
                   self.tool.GetTimeoutScale())

        start_ms = 0
        duration_ms = 0
        try:
            self.TestSetup(test)

            try:
                self.device.GoHome()
            except device_errors.CommandTimeoutError:
                logging.exception('Failed to focus the launcher.')

            time_ms = lambda: int(time.time() * 1000)
            start_ms = time_ms()
            raw_output = self._RunTest(test, timeout)
            duration_ms = time_ms() - start_ms

            # Parse the test output
            result_code, result_bundle, statuses = (
                instrumentation_test_instance.ParseAmInstrumentRawOutput(
                    raw_output))
            result = self._GenerateTestResult(test, result_code, result_bundle,
                                              statuses, start_ms, duration_ms)
            if local_device_instrumentation_test_run.DidPackageCrashOnDevice(
                    self.test_pkg.GetPackageName(), self.device):
                result.SetType(base_test_result.ResultType.CRASH)
            results.AddResult(result)
        except device_errors.CommandTimeoutError as e:
            results.AddResult(
                test_result.InstrumentationTestResult(
                    test,
                    base_test_result.ResultType.TIMEOUT,
                    start_ms,
                    duration_ms,
                    log=str(e) or 'No information'))
        except device_errors.DeviceUnreachableError as e:
            results.AddResult(
                test_result.InstrumentationTestResult(
                    test,
                    base_test_result.ResultType.CRASH,
                    start_ms,
                    duration_ms,
                    log=str(e) or 'No information'))
        self.TestTeardown(test, results)
        return (results, None if results.DidRunPass() else test)
Пример #6
0
  def testGetTestsToRetry_wildcardFilter_allPass(self):
    results = [
        base_test_result.BaseTestResult(
            'TestCase.Test1', base_test_result.ResultType.PASS),
        base_test_result.BaseTestResult(
            'TestCase.Test2', base_test_result.ResultType.PASS),
    ]

    tests = ['TestCase.*']
    try_results = base_test_result.TestRunResults()
    try_results.AddResults(results)

    test_run = TestLocalDeviceTestRun()
    tests_to_retry = test_run._GetTestsToRetry(tests, try_results)
    self.assertEquals(0, len(tests_to_retry))
Пример #7
0
    def testGenerateJsonTestResultFormatDict_failedResultWithRetry(self):
        result_1 = base_test_result.BaseTestResult(
            'test.package.TestName', base_test_result.ResultType.FAIL)
        run_results_1 = base_test_result.TestRunResults()
        run_results_1.AddResult(result_1)

        # Simulate a second retry with failure.
        result_2 = base_test_result.BaseTestResult(
            'test.package.TestName', base_test_result.ResultType.FAIL)
        run_results_2 = base_test_result.TestRunResults()
        run_results_2.AddResult(result_2)

        all_results = [run_results_1, run_results_2]

        results_dict = json_results.GenerateJsonTestResultFormatDict(
            all_results, False)
        self.assertEquals(1, len(results_dict['tests']))
        self.assertEquals(1, len(results_dict['tests']['test']))
        self.assertEquals(1, len(results_dict['tests']['test']['package']))
        self.assertEquals(
            'PASS',
            results_dict['tests']['test']['package']['TestName']['expected'])
        self.assertEquals(
            'FAIL FAIL',
            results_dict['tests']['test']['package']['TestName']['actual'])
        self.assertEquals(
            True, results_dict['tests']['test']['package']['TestName']
            ['is_unexpected'])

        # Note: technically a missing entry counts as zero.
        self.assertEquals(2, len(results_dict['num_failures_by_type']))
        self.assertEquals(0, results_dict['num_failures_by_type']['PASS'])

        # According to the spec: If a test was run more than once, only the first
        # invocation's result is included in the totals.
        self.assertEquals(1, results_dict['num_failures_by_type']['FAIL'])
Пример #8
0
 def RunTestsOnShard(self):
     results = base_test_result.TestRunResults()
     for test in self._tests:
         tries_left = self._retries
         result_type = None
         while (result_type != base_test_result.ResultType.PASS
                and tries_left > 0):
             try:
                 self._TestSetUp(test)
                 result_type = self._RunSingleTest(test)
             finally:
                 self._TestTearDown()
         results.AddResult(
             base_test_result.BaseTestResult(test, result_type))
     return results
Пример #9
0
def _RunInstrumentationTests(options, error_func, devices):
  """Subcommand of RunTestsCommands which runs instrumentation tests."""
  instrumentation_options = ProcessInstrumentationOptions(options, error_func)

  if len(devices) > 1 and options.wait_for_debugger:
    logging.warning('Debugger can not be sharded, using first available device')
    devices = devices[:1]

  results = base_test_result.TestRunResults()
  exit_code = 0

  if options.run_java_tests:
    runner_factory, tests = instrumentation_setup.Setup(instrumentation_options)

    test_results, exit_code = test_dispatcher.RunTests(
        tests, runner_factory, devices, shard=True, test_timeout=None,
        num_retries=options.num_retries)

    results.AddTestRunResults(test_results)

  if options.run_python_tests:
    runner_factory, tests = host_driven_setup.InstrumentationSetup(
        options.host_driven_root, options.official_build,
        instrumentation_options)

    if tests:
      test_results, test_exit_code = test_dispatcher.RunTests(
          tests, runner_factory, devices, shard=True, test_timeout=None,
          num_retries=options.num_retries)

      results.AddTestRunResults(test_results)

      # Only allow exit code escalation
      if test_exit_code and exit_code != constants.ERROR_EXIT_CODE:
        exit_code = test_exit_code

  if options.device_flags:
    options.device_flags = os.path.join(constants.DIR_SOURCE_ROOT,
                                        options.device_flags)

  report_results.LogFull(
      results=results,
      test_type='Instrumentation',
      test_package=os.path.basename(options.test_apk),
      annotation=options.annotations,
      flakiness_server=options.flakiness_dashboard_server)

  return exit_code
Пример #10
0
  def RunTest(self, test_name):
    """Run a perf test on the device.

    Args:
      test_name: String to use for logging the test result.

    Returns:
      A tuple of (TestRunResults, retry).
    """
    _, result_type = self._LaunchPerfTest(test_name)
    results = base_test_result.TestRunResults()
    results.AddResult(base_test_result.BaseTestResult(test_name, result_type))
    retry = None
    if not results.DidRunPass():
      retry = test_name
    return results, retry
Пример #11
0
 def _ParseTestResults(self):
     logging.info('Parsing results from stdout.')
     results = base_test_result.TestRunResults()
     output = self._results['results']['output'].splitlines()
     output = (l[len(self._INSTRUMENTATION_STREAM_LEADER):] for l in output
               if l.startswith(self._INSTRUMENTATION_STREAM_LEADER))
     results_list = self._test_instance.ParseGTestOutput(output)
     results.AddResults(results_list)
     if self._env.only_output_failures:
         logging.info('See logcat for more results information.')
     if not self._results['results']['pass']:
         results.AddResult(
             base_test_result.BaseTestResult(
                 'Remote Service detected error.',
                 base_test_result.ResultType.FAIL))
     return results
  def RunTests(self):
    with tempfile.NamedTemporaryFile() as json_file:
      java_script = os.path.join(constants.GetOutDirectory(), 'bin', 'helper',
                                 self._test_instance.suite)
      command = [java_script]

      # Add Jar arguments.
      jar_args = ['-test-jars', self._test_instance.suite + '.jar',
                  '-json-results-file', json_file.name]
      if self._test_instance.test_filter:
        jar_args.extend(['-gtest-filter', self._test_instance.test_filter])
      if self._test_instance.package_filter:
        jar_args.extend(['-package-filter',
                         self._test_instance.package_filter])
      if self._test_instance.runner_filter:
        jar_args.extend(['-runner-filter', self._test_instance.runner_filter])
      command.extend(['--jar-args', '"%s"' % ' '.join(jar_args)])

      # Add JVM arguments.
      jvm_args = []
      # TODO(mikecase): Add a --robolectric-dep-dir arg to test runner.
      # Have this arg set by GN in the generated test runner scripts.
      jvm_args += [
          '-Drobolectric.dependency.dir=%s' % os.path.join(
              constants.GetOutDirectory(), 'lib.java', 'third_party',
              'robolectric'),
          '-Ddir.source.root=%s' % constants.DIR_SOURCE_ROOT,
      ]
      if self._test_instance.coverage_dir:
        if not os.path.exists(self._test_instance.coverage_dir):
          os.makedirs(self._test_instance.coverage_dir)
        elif not os.path.isdir(self._test_instance.coverage_dir):
          raise Exception('--coverage-dir takes a directory, not file path.')
        jvm_args.append('-Demma.coverage.out.file=%s' % os.path.join(
            self._test_instance.coverage_dir,
            '%s.ec' % self._test_instance.suite))
      if jvm_args:
        command.extend(['--jvm-args', '"%s"' % ' '.join(jvm_args)])

      cmd_helper.RunCmd(command)
      results_list = json_results.ParseResultsFromJson(
          json.loads(json_file.read()))

      test_run_results = base_test_result.TestRunResults()
      test_run_results.AddResults(results_list)

      return [test_run_results]
Пример #13
0
 def RunTest(self, test):
   raw_result = None
   start_date_ms = None
   results = base_test_result.TestRunResults()
   timeout = (self._GetIndividualTestTimeoutSecs(test) *
              self._GetIndividualTestTimeoutScale(test) *
              self.tool.GetTimeoutScale())
   try:
     self.TestSetup(test)
     start_date_ms = int(time.time()) * 1000
     raw_result = self._RunTest(test, timeout)
     duration_ms = int(time.time()) * 1000 - start_date_ms
     status_code = raw_result.GetStatusCode()
     if status_code:
       if self.options.screenshot_failures:
         self._TakeScreenshot(test)
       log = raw_result.GetFailureReason()
       if not log:
         log = 'No information.'
       result_type = base_test_result.ResultType.FAIL
       package = self.adb.DismissCrashDialogIfNeeded()
       # Assume test package convention of ".test" suffix
       if package and package in self.test_pkg.GetPackageName():
         result_type = base_test_result.ResultType.CRASH
       result = test_result.InstrumentationTestResult(
           test, result_type, start_date_ms, duration_ms, log=log)
     else:
       result = test_result.InstrumentationTestResult(
           test, base_test_result.ResultType.PASS, start_date_ms, duration_ms)
     results.AddResult(result)
   # Catch exceptions thrown by StartInstrumentation().
   # See ../../third_party/android/testrunner/adb_interface.py
   except (android_commands.errors.WaitForResponseTimedOutError,
           android_commands.errors.DeviceUnresponsiveError,
           android_commands.errors.InstrumentationError), e:
     if start_date_ms:
       duration_ms = int(time.time()) * 1000 - start_date_ms
     else:
       start_date_ms = int(time.time()) * 1000
       duration_ms = 0
     message = str(e)
     if not message:
       message = 'No information.'
     results.AddResult(test_result.InstrumentationTestResult(
         test, base_test_result.ResultType.CRASH, start_date_ms, duration_ms,
         log=message))
     raw_result = None
def Dispatch(options):
    """Dispatches instrumentation tests onto connected device(s).

  If possible, this method will attempt to shard the tests to
  all connected devices. Otherwise, dispatch and run tests on one device.

  Args:
    options: Command line options.

  Returns:
    A TestRunResults object holding the results of the Java tests.

  Raises:
    Exception: when there are no attached devices.
  """
    test_pkg = test_package.TestPackage(options.test_apk_path,
                                        options.test_apk_jar_path)
    tests = test_pkg._GetAllMatchingTests(options.annotations,
                                          options.exclude_annotations,
                                          options.test_filter)
    if not tests:
        logging.warning('No instrumentation tests to run with current args.')
        return base_test_result.TestRunResults()

    attached_devices = android_commands.GetAttachedDevices()
    if not attached_devices:
        raise Exception('There are no devices online.')

    if options.device:
        assert options.device in attached_devices
        attached_devices = [options.device]

    if len(attached_devices) > 1 and options.wait_for_debugger:
        logging.warning(
            'Debugger can not be sharded, using first available device')
        attached_devices = attached_devices[:1]

    def TestRunnerFactory(device, shard_index):
        return test_runner.TestRunner(options, device, shard_index, test_pkg,
                                      [])

    return shard.ShardAndRunTests(TestRunnerFactory,
                                  attached_devices,
                                  tests,
                                  options.build_type,
                                  test_timeout=None,
                                  num_retries=options.num_retries)
Пример #15
0
    def RunTest(self, test):
        results = base_test_result.TestRunResults()
        timeout = (self._GetIndividualTestTimeoutSecs(test) *
                   self._GetIndividualTestTimeoutScale(test) *
                   self.tool.GetTimeoutScale())
        if (self.device.build_version_sdk <
                constants.ANDROID_SDK_VERSION_CODES.JELLY_BEAN):
            timeout *= 10

        start_ms = 0
        duration_ms = 0
        try:
            self.TestSetup(test)

            time_ms = lambda: int(time.time() * 1000)
            start_ms = time_ms()
            raw_output = self._RunTest(test, timeout)
            duration_ms = time_ms() - start_ms

            # Parse the test output
            _, _, statuses = (instrumentation_test_instance.
                              ParseAmInstrumentRawOutput(raw_output))
            result = self._GenerateTestResult(test, statuses, start_ms,
                                              duration_ms)
            if local_device_instrumentation_test_run.DidPackageCrashOnDevice(
                    self.test_pkg.GetPackageName(), self.device):
                result.SetType(base_test_result.ResultType.CRASH)
            results.AddResult(result)
        except device_errors.CommandTimeoutError as e:
            results.AddResult(
                test_result.InstrumentationTestResult(
                    test,
                    base_test_result.ResultType.TIMEOUT,
                    start_ms,
                    duration_ms,
                    log=str(e) or 'No information'))
        except device_errors.DeviceUnreachableError as e:
            results.AddResult(
                test_result.InstrumentationTestResult(
                    test,
                    base_test_result.ResultType.CRASH,
                    start_ms,
                    duration_ms,
                    log=str(e) or 'No information'))
        self.TestTeardown(test, results)
        return (results, None if results.DidRunPass() else test)
Пример #16
0
    def testGenerateResultsDict_failedResult(self):
        result = base_test_result.BaseTestResult(
            'test.package.TestName', base_test_result.ResultType.FAIL)

        all_results = base_test_result.TestRunResults()
        all_results.AddResult(result)

        results_dict = json_results.GenerateResultsDict([all_results])
        self.assertEquals(['test.package.TestName'], results_dict['all_tests'])
        self.assertEquals(1, len(results_dict['per_iteration_data']))

        iteration_result = results_dict['per_iteration_data'][0]
        self.assertTrue('test.package.TestName' in iteration_result)
        self.assertEquals(1, len(iteration_result['test.package.TestName']))

        test_iteration_result = iteration_result['test.package.TestName'][0]
        self.assertTrue('status' in test_iteration_result)
        self.assertEquals('FAILURE', test_iteration_result['status'])
Пример #17
0
  def _ParseTestResults(self):
    logging.info('Parsing results from stdout.')
    r = base_test_result.TestRunResults()
    result_code, result_bundle, statuses = (
        self._test_instance.ParseAmInstrumentRawOutput(
            self._results['results']['output'].splitlines()))
    result = self._test_instance.GenerateTestResults(
        result_code, result_bundle, statuses, 0, 0)

    if isinstance(result, base_test_result.BaseTestResult):
      r.AddResult(result)
    elif isinstance(result, list):
      r.AddResults(result)
    else:
      raise Exception('Unexpected result type: %s' % type(result).__name__)

    self._DetectPlatformErrors(r)
    return r
Пример #18
0
    def testGenerateJsonTestResultFormatDict_failedResult(self):
        result = base_test_result.BaseTestResult(
            'test.package.TestName', base_test_result.ResultType.FAIL)

        all_results = base_test_result.TestRunResults()
        all_results.AddResult(result)

        results_dict = json_results.GenerateJsonTestResultFormatDict(
            [all_results])
        self.assertEquals(1, len(results_dict['tests']))
        self.assertEquals(1, len(results_dict['tests']['test']))
        self.assertEquals(1, len(results_dict['tests']['test']['package']))
        self.assertEquals(
            'PASS',
            results_dict['tests']['test']['package']['TestName']['expected'])
        self.assertEquals(
            'FAIL',
            results_dict['tests']['test']['package']['TestName']['actual'])
Пример #19
0
def _RunInstrumentationTests(options, error_func):
    """Subcommand of RunTestsCommands which runs instrumentation tests."""
    instrumentation_options = ProcessInstrumentationOptions(
        options, error_func)

    results = base_test_result.TestRunResults()
    exit_code = 0

    if options.run_java_tests:
        runner_factory, tests = instrumentation_setup.Setup(
            instrumentation_options)

        test_results, exit_code = test_dispatcher.RunTests(
            tests,
            runner_factory,
            options.wait_for_debugger,
            options.test_device,
            shard=True,
            build_type=options.build_type,
            test_timeout=None,
            num_retries=options.num_retries)

        results.AddTestRunResults(test_results)

    if options.run_python_tests:
        test_results, test_exit_code = (
            python_dispatch.DispatchPythonTests(options))

        results.AddTestRunResults(test_results)

        # Only allow exit code escalation
        if test_exit_code and exit_code != constants.ERROR_EXIT_CODE:
            exit_code = test_exit_code

    report_results.LogFull(results=results,
                           test_type='Instrumentation',
                           test_package=os.path.basename(options.test_apk),
                           annotation=options.annotations,
                           build_type=options.build_type,
                           flakiness_server=options.flakiness_dashboard_server)

    return exit_code
Пример #20
0
  def testGetTestsToRetry_nonStringTests(self):
    results = [
        base_test_result.BaseTestResult(
            'TestCase.Test1', base_test_result.ResultType.PASS),
        base_test_result.BaseTestResult(
            'TestCase.Test2', base_test_result.ResultType.FAIL),
    ]

    tests = [
        {'name': 'TestCase.Test1'},
        {'name': 'TestCase.Test2'},
    ]
    try_results = base_test_result.TestRunResults()
    try_results.AddResults(results)

    test_run = TestLocalDeviceNonStringTestRun()
    tests_to_retry = test_run._GetTestsToRetry(tests, try_results)
    self.assertEquals(1, len(tests_to_retry))
    self.assertIsInstance(tests_to_retry[0], dict)
    self.assertEquals(tests[1], tests_to_retry[0])
Пример #21
0
    def testGenerateResultsDict_duration(self):
        result = base_test_result.BaseTestResult(
            'test.package.TestName',
            base_test_result.ResultType.PASS,
            duration=123)

        all_results = base_test_result.TestRunResults()
        all_results.AddResult(result)

        results_dict = json_results.GenerateResultsDict([all_results])
        self.assertEquals(['test.package.TestName'], results_dict['all_tests'])
        self.assertEquals(1, len(results_dict['per_iteration_data']))

        iteration_result = results_dict['per_iteration_data'][0]
        self.assertTrue('test.package.TestName' in iteration_result)
        self.assertEquals(1, len(iteration_result['test.package.TestName']))

        test_iteration_result = iteration_result['test.package.TestName'][0]
        self.assertTrue('elapsed_time_ms' in test_iteration_result)
        self.assertEquals(123, test_iteration_result['elapsed_time_ms'])
Пример #22
0
  def _RunJavaTests(self, package_name, tests):
    """Calls a list of tests and stops at the first test failure.

    This method iterates until either it encounters a non-passing test or it
    exhausts the list of tests. Then it returns the appropriate overall result.

    Test cases may make use of this method internally to assist in running
    instrumentation tests. This function relies on instrumentation_options
    being defined.

    Args:
      package_name: Package name in which the java tests live
          (e.g. foo.bar.baz.tests)
      tests: A list of Java test names which will be run

    Returns:
      A TestRunResults object containing an overall result for this set of Java
      tests. If any Java tests do not pass, this is a fail overall.
    """
    test_type = base_test_result.ResultType.PASS
    log = ''

    start_ms = int(time.time()) * 1000
    for test in tests:
      # We're only running one test at a time, so this TestRunResults object
      # will hold only one result.
      suite, test_name = test.split('.')
      java_result = self.__RunJavaTest(package_name, suite, test_name)
      assert len(java_result.GetAll()) == 1
      if not java_result.DidRunPass():
        result = java_result.GetNotPass().pop()
        log = result.GetLog()
        test_type = result.GetType()
        break
    duration_ms = int(time.time()) * 1000 - start_ms

    overall_result = base_test_result.TestRunResults()
    overall_result.AddResult(
        test_result.InstrumentationTestResult(
            self.tagged_name, test_type, start_ms, duration_ms, log=log))
    return overall_result
Пример #23
0
 def run_tests_on_device(dev, tests):
   r = base_test_result.TestRunResults()
   for test in tests:
     try:
       result = self._RunTest(dev, test)
       if isinstance(result, base_test_result.BaseTestResult):
         r.AddResult(result)
       elif isinstance(result, list):
         r.AddResults(result)
       else:
         raise Exception(
             'Unexpected result type: %s' % type(result).__name__)
     except:
       if isinstance(tests, test_collection.TestCollection):
         tests.add(test)
       raise
     finally:
       if isinstance(tests, test_collection.TestCollection):
         tests.test_completed()
   logging.info('Finished running tests on this device.')
   return r
def RunTests(tests, runner_factory):
  """Runs a set of java tests on the host.

  Return:
    A tuple containing the results & the exit code.
  """
  def run(t):
    runner = runner_factory(None, None)
    runner.SetUp()
    results_list, return_code = runner.RunTest(t)
    runner.TearDown()
    return (results_list, return_code == 0)

  test_run_results = base_test_result.TestRunResults()
  exit_code = 0
  for t in tests:
    results_list, passed = run(t)
    test_run_results.AddResults(results_list)
    if not passed:
      exit_code = constants.ERROR_EXIT_CODE
  return (test_run_results, exit_code)
Пример #25
0
    def RunTest(self, test):
        results = base_test_result.TestRunResults()
        timeout = (self._GetIndividualTestTimeoutSecs(test) *
                   self._GetIndividualTestTimeoutScale(test) *
                   self.tool.GetTimeoutScale())

        start_ms = 0
        duration_ms = 0
        try:
            self.TestSetup(test)

            time_ms = lambda: int(time.time() * 1000)
            start_ms = time_ms()
            raw_output = self._RunTest(test, timeout)
            duration_ms = time_ms() - start_ms

            # Parse the test output
            _, _, statuses = self._ParseAmInstrumentRawOutput(raw_output)
            result = self._GenerateTestResult(test, statuses, start_ms,
                                              duration_ms)
            results.AddResult(result)
        except device_errors.CommandTimeoutError as e:
            results.AddResult(
                test_result.InstrumentationTestResult(
                    test,
                    base_test_result.ResultType.TIMEOUT,
                    start_ms,
                    duration_ms,
                    log=str(e) or 'No information'))
        except device_errors.DeviceUnreachableError as e:
            results.AddResult(
                test_result.InstrumentationTestResult(
                    test,
                    base_test_result.ResultType.CRASH,
                    start_ms,
                    duration_ms,
                    log=str(e) or 'No information'))
        self.TestTeardown(test, results)
        return (results, None if results.DidRunPass() else test)
Пример #26
0
  def RunTest(self, test):
    test_results = base_test_result.TestRunResults()
    if not test:
      return test_results, None

    try:
      self.test_package.ClearApplicationState(self.device)
      self.test_package.CreateCommandLineFileOnDevice(
          self.device, test, self._test_arguments)
      test_results = self._ParseTestOutput(
          self.test_package.SpawnTestProcess(self.device))
    finally:
      self.CleanupSpawningServerState()
    # Calculate unknown test results.
    all_tests = set(test.split(':'))
    all_tests_ran = set([t.GetName() for t in test_results.GetAll()])
    unknown_tests = all_tests - all_tests_ran
    test_results.AddResults(
        [base_test_result.BaseTestResult(t, base_test_result.ResultType.UNKNOWN)
         for t in unknown_tests])
    retry = ':'.join([t.GetName() for t in test_results.GetNotPass()])
    return test_results, retry
Пример #27
0
    def testGenerateJsonTestResultFormatDict_passedResult(self):
        result = base_test_result.BaseTestResult(
            'test.package.TestName', base_test_result.ResultType.PASS)

        all_results = base_test_result.TestRunResults()
        all_results.AddResult(result)

        results_dict = json_results.GenerateJsonTestResultFormatDict(
            [all_results], False)
        self.assertEquals(1, len(results_dict['tests']))
        self.assertEquals(1, len(results_dict['tests']['test']))
        self.assertEquals(1, len(results_dict['tests']['test']['package']))
        self.assertEquals(
            'PASS',
            results_dict['tests']['test']['package']['TestName']['expected'])
        self.assertEquals(
            'PASS',
            results_dict['tests']['test']['package']['TestName']['actual'])

        # Note: technically a missing entry counts as zero.
        self.assertEquals(1, results_dict['num_failures_by_type']['PASS'])
        self.assertEquals(0, results_dict['num_failures_by_type']['FAIL'])
Пример #28
0
    def RunTest(self, test_name):
        """Run a Monkey test on the device.

    Args:
      test_name: String to use for logging the test result.

    Returns:
      A tuple of (TestRunResults, retry).
    """
        self.adb.StartActivity(self._package,
                               self._activity,
                               wait_for_completion=True,
                               action='android.intent.action.MAIN',
                               force_stop=True)

        # Chrome crashes are not always caught by Monkey test runner.
        # Verify Chrome has the same PID before and after the test.
        before_pids = self.adb.ExtractPid(self._package)

        # Run the test.
        output = ''
        if before_pids:
            output = '\n'.join(self._LaunchMonkeyTest())
            after_pids = self.adb.ExtractPid(self._package)

        crashed = (not before_pids or not after_pids
                   or after_pids[0] != before_pids[0])

        results = base_test_result.TestRunResults()
        success_pattern = 'Events injected: %d' % self._options.event_count
        if success_pattern in output and not crashed:
            result = base_test_result.BaseTestResult(
                test_name, base_test_result.ResultType.PASS, log=output)
        else:
            result = base_test_result.BaseTestResult(
                test_name, base_test_result.ResultType.FAIL, log=output)
        results.AddResult(result)
        return results, False
Пример #29
0
    def _RunJavaTests(self, fname, tests):
        """Calls a list of tests and stops at the first test failure.

    This method iterates until either it encounters a non-passing test or it
    exhausts the list of tests. Then it returns the appropriate Python result.

    Args:
      fname: filename for the Python test
      tests: a list of Java test names which will be run

    Returns:
      A TestRunResults object containing a result for this Python test.
    """
        test_type = base_test_result.ResultType.PASS
        log = ''

        start_ms = int(time.time()) * 1000
        for test in tests:
            # We're only running one test at a time, so this TestRunResults object
            # will hold only one result.
            suite, test_name = test.split('.')
            java_results = self._RunJavaTest(fname, suite, test_name)
            assert len(java_results.GetAll()) == 1
            if not java_results.DidRunPass():
                result = java_results.GetNotPass().pop()
                log = result.GetLog()
                test_type = result.GetType()
                break
        duration_ms = int(time.time()) * 1000 - start_ms

        python_results = base_test_result.TestRunResults()
        python_results.AddResult(
            test_result.InstrumentationTestResult(self.qualified_name,
                                                  test_type,
                                                  start_ms,
                                                  duration_ms,
                                                  log=log))
        return python_results
Пример #30
0
  def Run(self, device):
    """Run the test on a given device.
    Args:
      device: Name of target device where to run the test.
    Returns:
      A base_test_result.TestRunResult() instance.
    """
    margin = 8
    print '[ %-*s ] %s' % (margin, 'RUN', self.tagged_name)
    logging.info('Running linker test: %s', self.tagged_name)

    # Create command-line file on device.
    if self.is_modern_linker:
      command_line_flags = '--use-linker=modern'
    else:
      command_line_flags = '--use-linker=legacy'
    if self.is_low_memory:
      command_line_flags += ' --low-memory-device'
    device.WriteFile(_COMMAND_LINE_FILE, command_line_flags)

    # Run the test.
    status, logs = self._RunTest(device)

    result_text = 'OK'
    if status == ResultType.FAIL:
      result_text = 'FAILED'
    elif status == ResultType.TIMEOUT:
      result_text = 'TIMEOUT'
    print '[ %*s ] %s' % (margin, result_text, self.tagged_name)

    results = base_test_result.TestRunResults()
    results.AddResult(
        base_test_result.BaseTestResult(
            self.tagged_name,
            status,
            log=logs))

    return results