Esempio n. 1
0
 def test_fill_xctestrun_node(self, *args):
     """Tests fill_xctestrun_node method."""
     test_app = test_apps.DeviceXCTestUnitTestsApp(_TEST_APP_PATH)
     expected_xctestrun_node = {
         'TestTargetName': {
             'CommandLineArguments': [
                 '--enable-run-ios-unittests-with-xctest',
                 '--gmock_verbose=error'
             ],
             'IsAppHostedTestBundle':
             True,
             'TestBundlePath':
             '__TESTHOST__/%s' % _XCTEST_PATH,
             'TestHostBundleIdentifier':
             _BUNDLE_ID,
             'TestHostPath':
             '%s' % _TEST_APP_PATH,
             'TestingEnvironmentVariables': {
                 'DYLD_INSERT_LIBRARIES':
                 '__TESTHOST__/Frameworks/libXCTestBundleInject.dylib',
                 'DYLD_LIBRARY_PATH':
                 '__PLATFORMS__/iPhoneOS.platform/Developer/Library',
                 'DYLD_FRAMEWORK_PATH':
                 '__PLATFORMS__/iPhoneOS.platform/Developer/'
                 'Library/Frameworks',
                 'XCInjectBundleInto':
                 '__TESTHOST__/%s' % _MODULE_NAME
             }
         }
     }
     xctestrun_node = test_app.fill_xctestrun_node()
     self.assertEqual(xctestrun_node, expected_xctestrun_node)
Esempio n. 2
0
    def get_launch_test_app(self):
        """Returns the proper test_app for the run.

    Returns:
      A DeviceXCTestUnitTestsApp  for the current run to execute.
    """
        return test_apps.DeviceXCTestUnitTestsApp(
            self.app_path,
            included_tests=self.test_cases,
            env_vars=self.env_vars,
            test_args=self.test_args)
Esempio n. 3
0
    def get_launch_test_app(self):
        """Returns the proper test_app for the run.

    Returns:
      A DeviceXCTestUnitTestsApp  for the current run to execute.
    """
        # Non iOS Chrome users have unit tests not built with XCTest.
        if not self.xctest:
            return test_apps.GTestsApp(self.app_path,
                                       included_tests=self.test_cases,
                                       env_vars=self.env_vars,
                                       test_args=self.test_args)

        return test_apps.DeviceXCTestUnitTestsApp(
            self.app_path,
            included_tests=self.test_cases,
            env_vars=self.env_vars,
            test_args=self.test_args)
Esempio n. 4
0
    def launch(self):
        """Launches the test app."""
        self.set_up()
        destination = 'id=%s' % self.udid
        if self.xctest:
            test_app = test_apps.EgtestsApp(self.app_path,
                                            included_tests=self.test_cases,
                                            env_vars=self.env_vars,
                                            test_args=self.test_args)
        elif self.xctest_path:

            if self.__class__.__name__ == 'DeviceTestRunner':
                # When self.xctest is False and (bool)self.xctest_path is True and it's
                # using a device runner, this is a XCTest hosted unit test, which is
                # currently running on real devices.
                # TODO(crbug.com/1006881): Separate "running style" from "parser style"
                # for XCtests and Gtests.
                test_app = test_apps.DeviceXCTestUnitTestsApp(
                    self.app_path,
                    included_tests=self.test_cases,
                    env_vars=self.env_vars,
                    test_args=self.test_args)
            else:
                raise XCTestConfigError(
                    'Trying to run a DeviceXCTestUnitTestsApp on a'
                    'non device runner!')

        else:
            test_app = test_apps.GTestsApp(self.app_path,
                                           included_tests=self.test_cases,
                                           env_vars=self.env_vars,
                                           test_args=self.test_args)
        out_dir = os.path.join(self.out_dir, 'TestResults')
        cmd = self.get_launch_command(test_app, out_dir, destination,
                                      self.shards)
        try:
            result = self._run(cmd=cmd, shards=self.shards or 1)
            if result.crashed and not result.crashed_test:
                # If the app crashed but not during any particular test case, assume
                # it crashed on startup. Try one more time.
                self.shutdown_and_restart()
                LOGGER.warning('Crashed on startup, retrying...\n')
                out_dir = os.path.join(self.out_dir,
                                       'retry_after_crash_on_startup')
                cmd = self.get_launch_command(test_app, out_dir, destination,
                                              self.shards)
                result = self._run(cmd)

            if result.crashed and not result.crashed_test:
                raise AppLaunchError

            passed = result.passed_tests
            failed = result.failed_tests
            flaked = result.flaked_tests

            try:
                while result.crashed and result.crashed_test:
                    # If the app crashes during a specific test case, then resume at the
                    # next test case. This is achieved by filtering out every test case
                    # which has already run.
                    LOGGER.warning('Crashed during %s, resuming...\n',
                                   result.crashed_test)
                    test_app.excluded_tests = passed + failed.keys(
                    ) + flaked.keys()
                    retry_out_dir = os.path.join(
                        self.out_dir,
                        'retry_after_crash_%d' % int(time.time()))
                    result = self._run(
                        self.get_launch_command(
                            test_app,
                            os.path.join(retry_out_dir, str(int(time.time()))),
                            destination))
                    passed.extend(result.passed_tests)
                    failed.update(result.failed_tests)
                    flaked.update(result.flaked_tests)
            except OSError as e:
                if e.errno == errno.E2BIG:
                    LOGGER.error('Too many test cases to resume.')
                else:
                    raise

            # Instantiate this after crash retries so that all tests have a first
            # pass before entering the retry block below.
            # For each retry that passes, we want to mark it separately as passed
            # (ie/ "FAIL PASS"), with is_flaky=True.
            output = sju.StdJson(passed=passed, failed=failed, flaked=flaked)

            # Retry failed test cases.
            retry_results = {}
            test_app.excluded_tests = []
            if self.retries and failed:
                LOGGER.warning('%s tests failed and will be retried.\n',
                               len(failed))
                for i in xrange(self.retries):
                    for test in failed.keys():
                        LOGGER.info('Retry #%s for %s.\n', i + 1, test)
                        test_app.included_tests = [test]
                        retry_out_dir = os.path.join(self.out_dir,
                                                     test + '_failed',
                                                     'retry_%d' % i)
                        retry_result = self._run(
                            self.get_launch_command(test_app, retry_out_dir,
                                                    destination))
                        # If the test passed on retry, consider it flake instead of failure.
                        if test in retry_result.passed_tests:
                            flaked[test] = failed.pop(test)
                            output.mark_passed(test, flaky=True)
                        # Save the result of the latest run for each test.
                        retry_results[test] = retry_result

            # Build test_results.json.
            # Check if if any of the retries crashed in addition to the original run.
            interrupted = (result.crashed
                           or any([r.crashed for r in retry_results.values()]))
            self.test_results['interrupted'] = interrupted
            self.test_results['num_failures_by_type'] = {
                'FAIL': len(failed) + len(flaked),
                'PASS': len(passed),
            }

            self.test_results['tests'] = output.tests

            self.logs['passed tests'] = passed
            if flaked:
                self.logs['flaked tests'] = flaked
            if failed:
                self.logs['failed tests'] = failed
            for test, log_lines in failed.iteritems():
                self.logs[test] = log_lines
            for test, log_lines in flaked.iteritems():
                self.logs[test] = log_lines

            return not failed and not interrupted
        finally:
            self.tear_down()
Esempio n. 5
0
  def launch(self):
    """Launches the test app."""
    self.set_up()
    destination = 'id=%s' % self.udid
    # When current |launch| method is invoked, this is running a unit test
    # target. For simulators, '--xctest' is passed to test runner scripts to
    # make it run XCTest based unit test.
    if self.xctest:
      # TODO(crbug.com/1085603): Pass in test runner an arg to determine if it's
      # device test or simulator test and test the arg here.
      if self.__class__.__name__ == 'SimulatorTestRunner':
        test_app = test_apps.SimulatorXCTestUnitTestsApp(
            self.app_path,
            included_tests=self.test_cases,
            env_vars=self.env_vars,
            test_args=self.test_args)
      elif self.__class__.__name__ == 'DeviceTestRunner':
        test_app = test_apps.DeviceXCTestUnitTestsApp(
            self.app_path,
            included_tests=self.test_cases,
            env_vars=self.env_vars,
            test_args=self.test_args)
      else:
        raise XCTestConfigError('Wrong config. TestRunner.launch() called from'
                                ' an unexpected class.')
    else:
      test_app = test_apps.GTestsApp(
          self.app_path,
          included_tests=self.test_cases,
          env_vars=self.env_vars,
          test_args=self.test_args)
    out_dir = os.path.join(self.out_dir, 'TestResults')
    cmd = self.get_launch_command(test_app, out_dir, destination, self.shards)
    try:
      result = self._run(cmd=cmd, shards=self.shards or 1)
      if result.crashed and not result.crashed_test:
        # If the app crashed but not during any particular test case, assume
        # it crashed on startup. Try one more time.
        self.shutdown_and_restart()
        LOGGER.warning('Crashed on startup, retrying...\n')
        out_dir = os.path.join(self.out_dir, 'retry_after_crash_on_startup')
        cmd = self.get_launch_command(test_app, out_dir, destination,
                                      self.shards)
        result = self._run(cmd)

      if result.crashed and not result.crashed_test:
        raise AppLaunchError

      passed = result.passed_tests
      failed = result.failed_tests
      flaked = result.flaked_tests
      disabled = result.disabled_tests_from_compiled_tests_file

      try:
        while result.crashed and result.crashed_test:
          # If the app crashes during a specific test case, then resume at the
          # next test case. This is achieved by filtering out every test case
          # which has already run.
          LOGGER.warning('Crashed during %s, resuming...\n',
                         result.crashed_test)
          test_app.excluded_tests = passed + failed.keys() + flaked.keys()
          retry_out_dir = os.path.join(
              self.out_dir, 'retry_after_crash_%d' % int(time.time()))
          result = self._run(
              self.get_launch_command(
                  test_app, os.path.join(retry_out_dir, str(int(time.time()))),
                  destination))
          passed.extend(result.passed_tests)
          failed.update(result.failed_tests)
          flaked.update(result.flaked_tests)
          if not disabled:
            disabled = result.disabled_tests_from_compiled_tests_file

      except OSError as e:
        if e.errno == errno.E2BIG:
          LOGGER.error('Too many test cases to resume.')
        else:
          raise

      # Instantiate this after crash retries so that all tests have a first
      # pass before entering the retry block below.
      # For each retry that passes, we want to mark it separately as passed
      # (ie/ "FAIL PASS"), with is_flaky=True.
      # TODO(crbug.com/1132476): Report failed GTest logs to ResultSink.
      output = sju.StdJson(passed=passed, failed=failed, flaked=flaked)

      # Retry failed test cases.
      retry_results = {}
      test_app.excluded_tests = []
      if self.retries and failed:
        LOGGER.warning('%s tests failed and will be retried.\n', len(failed))
        for i in xrange(self.retries):
          for test in failed.keys():
            LOGGER.info('Retry #%s for %s.\n', i + 1, test)
            test_app.included_tests = [test]
            retry_out_dir = os.path.join(self.out_dir, test + '_failed',
                                         'retry_%d' % i)
            retry_result = self._run(
                self.get_launch_command(test_app, retry_out_dir, destination))
            # If the test passed on retry, consider it flake instead of failure.
            if test in retry_result.passed_tests:
              flaked[test] = failed.pop(test)
              output.mark_passed(test, flaky=True)
            # Save the result of the latest run for each test.
            retry_results[test] = retry_result

      output.mark_all_skipped(disabled)

      # Build test_results.json.
      # Check if if any of the retries crashed in addition to the original run.
      interrupted = (result.crashed or
                     any([r.crashed for r in retry_results.values()]))
      self.test_results['interrupted'] = interrupted
      self.test_results['num_failures_by_type'] = {
        'FAIL': len(failed) + len(flaked),
        'PASS': len(passed),
      }

      self.test_results['tests'] = output.tests

      self.logs['passed tests'] = passed
      if disabled:
        self.logs['disabled tests'] = disabled
      if flaked:
        self.logs['flaked tests'] = flaked
      if failed:
        self.logs['failed tests'] = failed
      for test, log_lines in failed.iteritems():
        self.logs[test] = log_lines
      for test, log_lines in flaked.iteritems():
        self.logs[test] = log_lines

      return not failed and not interrupted
    finally:
      self.tear_down()