コード例 #1
0
    def test_timeout(self):
        """Test setting timeout"""
        test = 'e'
        output = sju.StdJson()
        output.mark_timeout(test)
        self.assertEqual(output.tests['e']['actual'], 'TIMEOUT')

        output = sju.StdJson()
        output.mark_failed(test)
        self.assertEqual(output.tests['e']['actual'], 'FAIL')
        output.mark_timeout(test)
        self.assertTrue(output.tests['e']['actual'], 'FAIL TIMEOUT')
コード例 #2
0
    def test_single_constructor(self):
        """Test one test passing, failing, flaking via constructor"""
        test = 'a'

        output = sju.StdJson(passed=test)
        self.assertEqual(output.tests['a']['actual'], 'PASS')

        output = sju.StdJson(failed=test)
        self.assertEqual(output.tests['a']['actual'], 'FAIL')

        output = sju.StdJson(passed=test, flaky=True)
        self.assertEqual(output.tests['a']['actual'], 'PASS')
        self.assertTrue(output.tests['a']['is_flaky'])
コード例 #3
0
 def test_skip(self):
   """Test setting expected skip."""
   test = 'f'
   output = sju.StdJson()
   output.mark_skipped(test)
   self.assertEqual(output.tests['f']['actual'], 'SKIP')
   self.assertFalse(output.tests['f'].get('is_unexpected', False))
コード例 #4
0
    def test_base_cases(self):
        """Test invalid test names are skipped"""
        passed = ['', None]
        failed = ['', None]

        output = sju.StdJson(passed=passed, failed=failed)
        self.assertFalse(output.tests)
コード例 #5
0
 def test_shard(self):
   """Test shard into is written to test result."""
   test = 'f'
   output = sju.StdJson()
   output.mark_passed(test)
   self.assertEqual(output.tests['f']['shard'],
                    os.getenv('GTEST_SHARD_INDEX', 0))
コード例 #6
0
  def test_post_to_result_sink(self, mock_result_sink_class):
    result_sink = mock_result_sink_class.return_value
    passed_test = 'passed_test'
    failed_test = 'failed_test'
    skipped_test = 'skipped_test'
    timeout_test = 'timeout_test'

    output = sju.StdJson()
    output.mark_passed('passed_test')
    result_sink.post.assert_called_with('passed_test', 'PASS', True)
    output.mark_failed('failed_test', 'logs')
    result_sink.post.assert_called_with(
        'failed_test', 'FAIL', False, test_log='logs')
    output.mark_disabled('skipped_test')
    result_sink.post.assert_called_with(
        'skipped_test', 'SKIP', True, tags=[('disabled_test', 'true')])
    output.mark_timeout('timeout_test')
    timeout_log = (
        'The test is compiled in test target but was unexpectedly not'
        ' run or not finished.')
    result_sink.post.assert_called_with(
        'timeout_test',
        'SKIP',
        False,
        test_log=timeout_log,
        tags=[('disabled_test', 'false')])
コード例 #7
0
    def test_flaky_without_explicit(self):
        """Test setting pass on an already failed test, w/o explicit flaky"""
        test = 'e'
        output = sju.StdJson()
        output.mark_failed(test)
        self.assertEqual(output.tests['e']['actual'], 'FAIL')

        output.mark_passed(test)
        self.assertEqual(output.tests['e']['actual'], 'FAIL PASS')
        self.assertTrue(output.tests['e']['is_flaky'])
コード例 #8
0
    def test_multi_run(self):
        """Test multiple executions of the same test"""
        test = 'a'

        output = sju.StdJson(passed=test)
        self.assertEqual(output.tests['a']['actual'], 'PASS')

        output.mark_failed(test)
        self.assertEqual(output.tests['a']['actual'], 'PASS FAIL')

        output.mark_pass(test, flaky=True)
        self.assertEqual(output.tests['a']['actual'], 'PASS FAIL PASS')
        self.assertTrue(output.tests['a']['is_flaky'])
コード例 #9
0
    def test_multi_scenario(self):
        """Test a scenario where some tests pass, fail and flake"""
        passed = ['a', 'b', 'c']
        failed = ['d']
        flaked = ['e']

        output = sju.StdJson(passed=passed, failed=failed, flaked=flaked)
        self.assertEqual(len(output.tests), 5)
        # Ensure that the flaked is set as passed, with is_flaky=True
        self.assertTrue(output.tests['e']['is_flaky'])

        # A retry that re-runs failed fails again
        output.mark_failed('d')
        self.assertEqual(output.tests['d']['actual'], 'FAIL FAIL')

        # Another retry of 'd' passes, so we set it as a flaky pass
        output.mark_passed('d', flaky=True)
        self.assertEqual(output.tests['d']['actual'], 'FAIL FAIL PASS')
        self.assertTrue(output.tests['d']['is_flaky'])
コード例 #10
0
    def launch(self):
        """Launches tests using xcodebuild."""
        launch_commands = []
        for params in self.sharding_data:
            test_app = self.get_launch_test_app(params)
            launch_commands.append(
                LaunchCommand(
                    test_app,
                    udid=params['udid'],
                    shards=params['shards'],
                    retries=self.retries,
                    out_dir=os.path.join(self.out_dir, params['udid']),
                    use_clang_coverage=(hasattr(self, 'use_clang_coverage')
                                        and self.use_clang_coverage),
                    env=self.get_launch_env()))

        thread_pool = pool.ThreadPool(len(launch_commands))
        attempts_results = []
        for result in thread_pool.imap_unordered(LaunchCommand.launch,
                                                 launch_commands):
            attempts_results.append(result['test_results']['attempts'])

        # Deletes simulator used in the tests after tests end.
        if iossim_util.is_device_with_udid_simulator(self.udid):
            iossim_util.delete_simulator_by_udid(self.udid)

        # Gets passed tests
        self.logs['passed tests'] = []
        for shard_attempts in attempts_results:
            for attempt in shard_attempts:
                self.logs['passed tests'].extend(attempt['passed'])

        # If the last attempt does not have failures, mark failed as empty
        self.logs['failed tests'] = []
        for shard_attempts in attempts_results:
            if shard_attempts[-1]['failed']:
                self.logs['failed tests'].extend(
                    shard_attempts[-1]['failed'].keys())

        # Gets disabled tests from test app object if any.
        self.logs['disabled tests'] = []
        for launch_command in launch_commands:
            self.logs['disabled tests'].extend(
                launch_command.egtests_app.disabled_tests)

        # Gets all failures/flakes and lists them in bot summary
        all_failures = set()
        for shard_attempts in attempts_results:
            for attempt, attempt_results in enumerate(shard_attempts):
                for failure in attempt_results['failed']:
                    if failure not in self.logs:
                        self.logs[failure] = []
                    self.logs[failure].append('%s: attempt # %d' %
                                              (failure, attempt))
                    self.logs[failure].extend(
                        attempt_results['failed'][failure])
                    all_failures.add(failure)

        # Gets only flaky(not failed) tests.
        self.logs['flaked tests'] = list(all_failures -
                                         set(self.logs['failed tests']))

        # Gets not-started/interrupted tests.
        # all_tests_to_run takes into consideration that only a subset of tests may
        # have run due to the test sharding logic in run.py.
        all_tests_to_run = set([
            test_name for launch_command in launch_commands
            for test_name in launch_command.egtests_app.get_all_tests()
        ])

        aborted_tests = []
        # TODO(crbug.com/1048758): For device targets, the list of test names parsed
        # from otool output is incorrect. For multitasking or any flaky test suite,
        # the list contains more tests than what actually runs.
        if (self.__class__.__name__ != 'DeviceXcodeTestRunner'
                and 'ios_chrome_multitasking_eg' not in self.app_path
                and '_flaky_eg' not in self.app_path):
            aborted_tests = list(all_tests_to_run -
                                 set(self.logs['failed tests']) -
                                 set(self.logs['passed tests']))
        aborted_tests.sort()
        self.logs['aborted tests'] = aborted_tests

        self.test_results['interrupted'] = bool(aborted_tests)
        self.test_results['num_failures_by_type'] = {
            'FAIL':
            len(self.logs['failed tests'] + self.logs['aborted tests']),
            'PASS': len(self.logs['passed tests']),
        }

        output = sju.StdJson()
        for shard_attempts in attempts_results:
            for attempt, attempt_results in enumerate(shard_attempts):

                for test in attempt_results['failed'].keys():
                    # TODO(crbug.com/1178923): Remove unicode check when it's figured out
                    # where unicode is introduced.
                    log_lines = []
                    for line in self.logs.get(test, []):
                        if sys.version_info.major == 2:
                            if isinstance(line, unicode):
                                LOGGER.warning('Unicode string: %s' % line)
                                line = line.encode('utf-8')
                        log_lines.append(line)

                    output.mark_failed(test, test_log='\n'.join(log_lines))

                # 'aborted tests' in logs is an array of strings, each string defined
                # as "{TestCase}/{testMethod}"
                for test in self.logs['aborted tests']:
                    output.mark_timeout(test)

                for test in attempt_results['passed']:
                    output.mark_passed(test)

        output.mark_all_disabled(self.logs['disabled tests'])
        output.finalize()

        self.test_results['tests'] = output.tests

        # Test is failed if there are failures for the last run.
        # or if there are aborted tests.
        return not self.logs['failed tests'] and not self.logs['aborted tests']
コード例 #11
0
    def launch(self):
        """Launches the test app."""
        self.set_up()
        destination = 'id=%s' % self.udid
        if self.xctest:
            test_app = test_apps.EgtestsApp(self.app_path,
                                            included_tests=self.test_cases,
                                            env_vars=self.env_vars,
                                            test_args=self.test_args)
        elif self.xctest_path:

            if self.__class__.__name__ == 'DeviceTestRunner':
                # When self.xctest is False and (bool)self.xctest_path is True and it's
                # using a device runner, this is a XCTest hosted unit test, which is
                # currently running on real devices.
                # TODO(crbug.com/1006881): Separate "running style" from "parser style"
                # for XCtests and Gtests.
                test_app = test_apps.DeviceXCTestUnitTestsApp(
                    self.app_path,
                    included_tests=self.test_cases,
                    env_vars=self.env_vars,
                    test_args=self.test_args)
            else:
                raise XCTestConfigError(
                    'Trying to run a DeviceXCTestUnitTestsApp on a'
                    'non device runner!')

        else:
            test_app = test_apps.GTestsApp(self.app_path,
                                           included_tests=self.test_cases,
                                           env_vars=self.env_vars,
                                           test_args=self.test_args)
        out_dir = os.path.join(self.out_dir, 'TestResults')
        cmd = self.get_launch_command(test_app, out_dir, destination,
                                      self.shards)
        try:
            result = self._run(cmd=cmd, shards=self.shards or 1)
            if result.crashed and not result.crashed_test:
                # If the app crashed but not during any particular test case, assume
                # it crashed on startup. Try one more time.
                self.shutdown_and_restart()
                LOGGER.warning('Crashed on startup, retrying...\n')
                out_dir = os.path.join(self.out_dir,
                                       'retry_after_crash_on_startup')
                cmd = self.get_launch_command(test_app, out_dir, destination,
                                              self.shards)
                result = self._run(cmd)

            if result.crashed and not result.crashed_test:
                raise AppLaunchError

            passed = result.passed_tests
            failed = result.failed_tests
            flaked = result.flaked_tests

            try:
                while result.crashed and result.crashed_test:
                    # If the app crashes during a specific test case, then resume at the
                    # next test case. This is achieved by filtering out every test case
                    # which has already run.
                    LOGGER.warning('Crashed during %s, resuming...\n',
                                   result.crashed_test)
                    test_app.excluded_tests = passed + failed.keys(
                    ) + flaked.keys()
                    retry_out_dir = os.path.join(
                        self.out_dir,
                        'retry_after_crash_%d' % int(time.time()))
                    result = self._run(
                        self.get_launch_command(
                            test_app,
                            os.path.join(retry_out_dir, str(int(time.time()))),
                            destination))
                    passed.extend(result.passed_tests)
                    failed.update(result.failed_tests)
                    flaked.update(result.flaked_tests)
            except OSError as e:
                if e.errno == errno.E2BIG:
                    LOGGER.error('Too many test cases to resume.')
                else:
                    raise

            # Instantiate this after crash retries so that all tests have a first
            # pass before entering the retry block below.
            # For each retry that passes, we want to mark it separately as passed
            # (ie/ "FAIL PASS"), with is_flaky=True.
            output = sju.StdJson(passed=passed, failed=failed, flaked=flaked)

            # Retry failed test cases.
            retry_results = {}
            test_app.excluded_tests = []
            if self.retries and failed:
                LOGGER.warning('%s tests failed and will be retried.\n',
                               len(failed))
                for i in xrange(self.retries):
                    for test in failed.keys():
                        LOGGER.info('Retry #%s for %s.\n', i + 1, test)
                        test_app.included_tests = [test]
                        retry_out_dir = os.path.join(self.out_dir,
                                                     test + '_failed',
                                                     'retry_%d' % i)
                        retry_result = self._run(
                            self.get_launch_command(test_app, retry_out_dir,
                                                    destination))
                        # If the test passed on retry, consider it flake instead of failure.
                        if test in retry_result.passed_tests:
                            flaked[test] = failed.pop(test)
                            output.mark_passed(test, flaky=True)
                        # Save the result of the latest run for each test.
                        retry_results[test] = retry_result

            # Build test_results.json.
            # Check if if any of the retries crashed in addition to the original run.
            interrupted = (result.crashed
                           or any([r.crashed for r in retry_results.values()]))
            self.test_results['interrupted'] = interrupted
            self.test_results['num_failures_by_type'] = {
                'FAIL': len(failed) + len(flaked),
                'PASS': len(passed),
            }

            self.test_results['tests'] = output.tests

            self.logs['passed tests'] = passed
            if flaked:
                self.logs['flaked tests'] = flaked
            if failed:
                self.logs['failed tests'] = failed
            for test, log_lines in failed.iteritems():
                self.logs[test] = log_lines
            for test, log_lines in flaked.iteritems():
                self.logs[test] = log_lines

            return not failed and not interrupted
        finally:
            self.tear_down()
コード例 #12
0
  def launch(self):
    """Launches the test app."""
    self.set_up()
    destination = 'id=%s' % self.udid
    # When current |launch| method is invoked, this is running a unit test
    # target. For simulators, '--xctest' is passed to test runner scripts to
    # make it run XCTest based unit test.
    if self.xctest:
      # TODO(crbug.com/1085603): Pass in test runner an arg to determine if it's
      # device test or simulator test and test the arg here.
      if self.__class__.__name__ == 'SimulatorTestRunner':
        test_app = test_apps.SimulatorXCTestUnitTestsApp(
            self.app_path,
            included_tests=self.test_cases,
            env_vars=self.env_vars,
            test_args=self.test_args)
      elif self.__class__.__name__ == 'DeviceTestRunner':
        test_app = test_apps.DeviceXCTestUnitTestsApp(
            self.app_path,
            included_tests=self.test_cases,
            env_vars=self.env_vars,
            test_args=self.test_args)
      else:
        raise XCTestConfigError('Wrong config. TestRunner.launch() called from'
                                ' an unexpected class.')
    else:
      test_app = test_apps.GTestsApp(
          self.app_path,
          included_tests=self.test_cases,
          env_vars=self.env_vars,
          test_args=self.test_args)
    out_dir = os.path.join(self.out_dir, 'TestResults')
    cmd = self.get_launch_command(test_app, out_dir, destination, self.shards)
    try:
      result = self._run(cmd=cmd, shards=self.shards or 1)
      if result.crashed and not result.crashed_test:
        # If the app crashed but not during any particular test case, assume
        # it crashed on startup. Try one more time.
        self.shutdown_and_restart()
        LOGGER.warning('Crashed on startup, retrying...\n')
        out_dir = os.path.join(self.out_dir, 'retry_after_crash_on_startup')
        cmd = self.get_launch_command(test_app, out_dir, destination,
                                      self.shards)
        result = self._run(cmd)

      if result.crashed and not result.crashed_test:
        raise AppLaunchError

      passed = result.passed_tests
      failed = result.failed_tests
      flaked = result.flaked_tests
      disabled = result.disabled_tests_from_compiled_tests_file

      try:
        while result.crashed and result.crashed_test:
          # If the app crashes during a specific test case, then resume at the
          # next test case. This is achieved by filtering out every test case
          # which has already run.
          LOGGER.warning('Crashed during %s, resuming...\n',
                         result.crashed_test)
          test_app.excluded_tests = passed + failed.keys() + flaked.keys()
          retry_out_dir = os.path.join(
              self.out_dir, 'retry_after_crash_%d' % int(time.time()))
          result = self._run(
              self.get_launch_command(
                  test_app, os.path.join(retry_out_dir, str(int(time.time()))),
                  destination))
          passed.extend(result.passed_tests)
          failed.update(result.failed_tests)
          flaked.update(result.flaked_tests)
          if not disabled:
            disabled = result.disabled_tests_from_compiled_tests_file

      except OSError as e:
        if e.errno == errno.E2BIG:
          LOGGER.error('Too many test cases to resume.')
        else:
          raise

      # Instantiate this after crash retries so that all tests have a first
      # pass before entering the retry block below.
      # For each retry that passes, we want to mark it separately as passed
      # (ie/ "FAIL PASS"), with is_flaky=True.
      # TODO(crbug.com/1132476): Report failed GTest logs to ResultSink.
      output = sju.StdJson(passed=passed, failed=failed, flaked=flaked)

      # Retry failed test cases.
      retry_results = {}
      test_app.excluded_tests = []
      if self.retries and failed:
        LOGGER.warning('%s tests failed and will be retried.\n', len(failed))
        for i in xrange(self.retries):
          for test in failed.keys():
            LOGGER.info('Retry #%s for %s.\n', i + 1, test)
            test_app.included_tests = [test]
            retry_out_dir = os.path.join(self.out_dir, test + '_failed',
                                         'retry_%d' % i)
            retry_result = self._run(
                self.get_launch_command(test_app, retry_out_dir, destination))
            # If the test passed on retry, consider it flake instead of failure.
            if test in retry_result.passed_tests:
              flaked[test] = failed.pop(test)
              output.mark_passed(test, flaky=True)
            # Save the result of the latest run for each test.
            retry_results[test] = retry_result

      output.mark_all_skipped(disabled)

      # Build test_results.json.
      # Check if if any of the retries crashed in addition to the original run.
      interrupted = (result.crashed or
                     any([r.crashed for r in retry_results.values()]))
      self.test_results['interrupted'] = interrupted
      self.test_results['num_failures_by_type'] = {
        'FAIL': len(failed) + len(flaked),
        'PASS': len(passed),
      }

      self.test_results['tests'] = output.tests

      self.logs['passed tests'] = passed
      if disabled:
        self.logs['disabled tests'] = disabled
      if flaked:
        self.logs['flaked tests'] = flaked
      if failed:
        self.logs['failed tests'] = failed
      for test, log_lines in failed.iteritems():
        self.logs[test] = log_lines
      for test, log_lines in flaked.iteritems():
        self.logs[test] = log_lines

      return not failed and not interrupted
    finally:
      self.tear_down()
コード例 #13
0
    def launch(self):
        """Launches tests using xcodebuild."""
        launch_commands = []
        for params in self.sharding_data:
            test_app = test_apps.EgtestsApp(
                params['app'],
                included_tests=params['test_cases'],
                env_vars=self.env_vars,
                test_args=self.test_args,
                release=self.release,
                host_app_path=params['host'])
            launch_commands.append(
                LaunchCommand(
                    test_app,
                    udid=params['udid'],
                    shards=params['shards'],
                    retries=self.retries,
                    out_dir=os.path.join(self.out_dir, params['udid']),
                    use_clang_coverage=(hasattr(self, 'use_clang_coverage')
                                        and self.use_clang_coverage),
                    env=self.get_launch_env()))

        thread_pool = pool.ThreadPool(len(launch_commands))
        attempts_results = []
        for result in thread_pool.imap_unordered(LaunchCommand.launch,
                                                 launch_commands):
            attempts_results.append(result['test_results']['attempts'])

        # Gets passed tests
        self.logs['passed tests'] = []
        for shard_attempts in attempts_results:
            for attempt in shard_attempts:
                self.logs['passed tests'].extend(attempt['passed'])

        # If the last attempt does not have failures, mark failed as empty
        self.logs['failed tests'] = []
        for shard_attempts in attempts_results:
            if shard_attempts[-1]['failed']:
                self.logs['failed tests'].extend(
                    shard_attempts[-1]['failed'].keys())

        # Gets all failures/flakes and lists them in bot summary
        all_failures = set()
        for shard_attempts in attempts_results:
            for attempt, attempt_results in enumerate(shard_attempts):
                for failure in attempt_results['failed']:
                    if failure not in self.logs:
                        self.logs[failure] = []
                    self.logs[failure].append('%s: attempt # %d' %
                                              (failure, attempt))
                    self.logs[failure].extend(
                        attempt_results['failed'][failure])
                    all_failures.add(failure)

        # Gets only flaky(not failed) tests.
        self.logs['flaked tests'] = list(all_failures -
                                         set(self.logs['failed tests']))

        # Gets not-started/interrupted tests.
        # all_tests_to_run takes into consideration that only a subset of tests may
        # have run due to the test sharding logic in run.py.
        all_tests_to_run = set([
            test_name for launch_command in launch_commands
            for test_name in launch_command.egtests_app.get_all_tests()
        ])

        aborted_tests = []
        # TODO(crbug.com/1048758): For device targets, the list of test names parsed
        # from otool output is incorrect. For multitasking or any flaky test suite,
        # the list contains more tests than what actually runs.
        if (self.__class__.__name__ != 'DeviceXcodeTestRunner'
                and 'ios_chrome_multitasking_eg' not in self.app_path
                and '_flaky_eg' not in self.app_path):
            aborted_tests = list(all_tests_to_run -
                                 set(self.logs['failed tests']) -
                                 set(self.logs['passed tests']))
        aborted_tests.sort()
        self.logs['aborted tests'] = aborted_tests

        self.test_results['interrupted'] = bool(aborted_tests)
        self.test_results['num_failures_by_type'] = {
            'FAIL':
            len(self.logs['failed tests'] + self.logs['aborted tests']),
            'PASS': len(self.logs['passed tests']),
        }

        output = sju.StdJson()
        for shard_attempts in attempts_results:
            for attempt, attempt_results in enumerate(shard_attempts):

                for test in attempt_results['failed'].keys():
                    output.mark_failed(test)

                # 'aborted tests' in logs is an array of strings, each string defined
                # as "{TestCase}/{testMethod}"
                for test in self.logs['aborted tests']:
                    output.mark_timeout(test)

                for test in attempt_results['passed']:
                    output.mark_passed(test)

        self.test_results['tests'] = output.tests

        # Test is failed if there are failures for the last run.
        # or if there are aborted tests.
        return not self.logs['failed tests'] and not self.logs['aborted tests']