Ejemplo n.º 1
0
  def get_wpr_test_command(self, recipe_path, test_name):
    """Creates xcodebuild commands for running a wpr test per recipe_path.

    Args:
      recipe_path: (str) Path to wpr recipe file.
      test_name: (str) Test name(format: ios_website) of this wpr test.

    Returns:
      Xcodebuild command to run in the format of a list of str.
    """
    wpr_test_args = [
        '--enable-features=AutofillShowTypePredictions',
        '-autofillautomation=%s' % recipe_path,
    ]
    wpr_egtests_app = test_apps.EgtestsApp(
        self.app_path,
        included_tests=["AutofillAutomationTestCase"],
        env_vars=self.env_vars,
        test_args=wpr_test_args,
        host_app_path=self.host_app_path)

    self.test_attempt_count[test_name] = self.test_attempt_count.get(
        test_name, 0) + 1

    destination = 'platform=iOS Simulator,OS=%s,name=%s' % (self.version,
                                                            self.platform)
    destination_folder = '%s %s %s attempt#%s' % (
        self.version, self.platform, test_name,
        self.test_attempt_count[test_name])
    out_dir = os.path.join(self.out_dir, destination_folder)
    return wpr_egtests_app.command(out_dir, destination, self.shards)
Ejemplo n.º 2
0
 def test_xctestRunNode_without_filter(self):
     self.mock(test_apps.EgtestsApp, '_xctest_path',
               lambda _: 'xctest-path')
     egtest_node = test_apps.EgtestsApp(
         _TEST_APP_PATH).fill_xctestrun_node()['test_app_module']
     self.assertNotIn('OnlyTestIdentifiers', egtest_node)
     self.assertNotIn('SkipTestIdentifiers', egtest_node)
Ejemplo n.º 3
0
 def test_command_with_repeat_count_incorrect_xcode(self, _1, _2):
     """Tests |command| raises error with repeat_count in lower Xcode version."""
     egtests_app = test_apps.EgtestsApp('app_path',
                                        host_app_path='host_app_path',
                                        repeat_count=2)
     with self.assertRaises(
             test_runner_errors.XcodeUnsupportedFeatureError):
         cmd = egtests_app.command('outdir', 'id=UUID', 1)
Ejemplo n.º 4
0
 def testEgtests_xctestRunNode_with_filter_skip_identifiers(self):
     skipped_tests = [
         'TestCase1/testMethod1', 'TestCase1/testMethod2',
         'TestCase2/testMethod1', 'TestCase1/testMethod2'
     ]
     egtest_node = test_apps.EgtestsApp(
         _EGTESTS_APP_PATH, excluded_tests=skipped_tests
     ).fill_xctestrun_node()['any_egtests_module']
     self.assertEqual(skipped_tests, egtest_node['SkipTestIdentifiers'])
     self.assertNotIn('OnlyTestIdentifiers', egtest_node)
Ejemplo n.º 5
0
 def testNonTestsFiltered(self, mock_fetch, _):
     mock_fetch.return_value = [
         ('ATestCase', 'testB'),
         ('setUpForTestCase', 'testForStartup'),
         ('ChromeTestCase', 'testServer'),
         ('FindInPageTestCase', 'testURL'),
         ('CTestCase', 'testD'),
     ]
     test_app = test_apps.EgtestsApp(_TEST_APP_PATH)
     tests = test_app.get_all_tests()
     self.assertEqual(set(tests),
                      set(['ATestCase/testB', 'CTestCase/testD']))
Ejemplo n.º 6
0
 def test_xctestRunNode_with_filter_skip_identifiers(self):
     self.mock(test_apps.EgtestsApp, '_xctest_path',
               lambda _: 'xctest-path')
     skipped_tests = [
         'TestCase1/testMethod1', 'TestCase1/testMethod2',
         'TestCase2/testMethod1', 'TestCase1/testMethod2'
     ]
     egtest_node = test_apps.EgtestsApp(
         _TEST_APP_PATH, excluded_tests=skipped_tests).fill_xctestrun_node(
         )['test_app_module']
     self.assertEqual(skipped_tests, egtest_node['SkipTestIdentifiers'])
     self.assertNotIn('OnlyTestIdentifiers', egtest_node)
Ejemplo n.º 7
0
    def get_launch_test_app(self):
        """Returns the proper test_app for the run.

    Returns:
      An implementation of EgtestsApp for the runner.
    """
        return test_apps.EgtestsApp(self.app_path,
                                    included_tests=self.test_cases,
                                    env_vars=self.env_vars,
                                    test_args=self.test_args,
                                    release=self.release,
                                    repeat_count=self.repeat_count,
                                    host_app_path=self.host_app_path)
Ejemplo n.º 8
0
 def test_command_with_repeat_count(self, _1, _2):
     """Tests command method can produce repeat_count arguments when available.
 """
     egtests_app = test_apps.EgtestsApp('app_path',
                                        host_app_path='host_app_path',
                                        repeat_count=2)
     cmd = egtests_app.command('outdir', 'id=UUID', 1)
     expected_cmd = [
         'arch', '-arch', 'arm64', 'xcodebuild', 'test-without-building',
         '-xctestrun', 'xctestrun', '-destination', 'id=UUID',
         '-resultBundlePath', 'outdir', '-test-iterations', '2'
     ]
     self.assertEqual(cmd, expected_cmd)
Ejemplo n.º 9
0
 def test_launch_command_not_restart_crashed_attempt(
         self, mock_collect_results):
     """Crashed first attempt of runtime select test suite won't be retried."""
     egtests = test_apps.EgtestsApp(_FLAKY_EGTEST_APP_PATH)
     crashed_collection = ResultCollection()
     crashed_collection.crashed = True
     mock_collect_results.return_value = crashed_collection
     launch_command = xcodebuild_runner.LaunchCommand(egtests,
                                                      _DESTINATION,
                                                      shards=1,
                                                      retries=3)
     overall_result = launch_command.launch()
     self.assertEqual(len(overall_result.all_test_names()), 0)
     self.assertEqual(overall_result.expected_tests(), set([]))
     self.assertTrue(overall_result.crashed)
Ejemplo n.º 10
0
    def get_launch_test_app(self, params):
        """Returns the proper test_app for the run, requiring sharding data.

    Args:
      params: A collection of sharding_data params.

    Returns:
      An implementation of EgtestsApp included the sharding_data params
    """
        return test_apps.EgtestsApp(params['app'],
                                    included_tests=params['test_cases'],
                                    env_vars=self.env_vars,
                                    test_args=self.test_args,
                                    release=self.release,
                                    host_app_path=params['host'])
Ejemplo n.º 11
0
 def testLaunchCommand_notRestartPassedTest(self, mock_collect_results):
     egtests = test_apps.EgtestsApp(_EGTESTS_APP_PATH)
     collection = ResultCollection(test_results=[
         TestResult('Class1/passedTest1', TestStatus.PASS),
         TestResult('Class1/passedTest2', TestStatus.PASS)
     ])
     mock_collect_results.side_effect = [collection]
     launch_command = xcodebuild_runner.LaunchCommand(egtests,
                                                      _DESTINATION,
                                                      shards=1,
                                                      retries=3)
     launch_command.launch()
     xcodebuild_runner.LaunchCommand(egtests,
                                     _DESTINATION,
                                     shards=1,
                                     retries=3)
     self.assertEqual(1, len(mock_collect_results.mock_calls))
Ejemplo n.º 12
0
 def testLaunchCommand_notRestartPassedTest(self, mock_collect_results,
                                            xcode_version):
     egtests = test_apps.EgtestsApp(_EGTESTS_APP_PATH)
     xcode_version.return_value = {'version': '10.2.1'}
     mock_collect_results.side_effect = [{
         'failed': {
             'BUILD_INTERRUPTED': 'BUILD_INTERRUPTED: attempt # 0'
         },
         'passed': ['Class1/passedTest1', 'Class1/passedTest2']
     }]
     launch_command = xcodebuild_runner.LaunchCommand(egtests,
                                                      _DESTINATION,
                                                      shards=1,
                                                      retries=3,
                                                      out_dir=self.tmpdir)
     self.fake_launch_attempt(launch_command, ['pass'])
     launch_command.launch()
     self.assertEqual(1, len(launch_command.test_results['attempts']))
Ejemplo n.º 13
0
 def testLaunchCommand_restartCrashed1stAttempt(self, mock_collect_results):
     egtests = test_apps.EgtestsApp(_EGTESTS_APP_PATH)
     crashed_collection = ResultCollection()
     crashed_collection.crashed = True
     mock_collect_results.side_effect = [
         crashed_collection,
         ResultCollection(test_results=[
             TestResult('Class1/passedTest1', TestStatus.PASS),
             TestResult('Class1/passedTest2', TestStatus.PASS)
         ])
     ]
     launch_command = xcodebuild_runner.LaunchCommand(egtests,
                                                      _DESTINATION,
                                                      shards=1,
                                                      retries=3)
     overall_result = launch_command.launch()
     self.assertFalse(overall_result.crashed)
     self.assertEqual(len(overall_result.all_test_names()), 2)
     self.assertEqual(overall_result.expected_tests(),
                      set(['Class1/passedTest1', 'Class1/passedTest2']))
Ejemplo n.º 14
0
 def testLaunchCommand_restartFailed1stAttempt(self, mock_collect_results,
                                               xcode_version):
     egtests = test_apps.EgtestsApp(_EGTESTS_APP_PATH)
     xcode_version.return_value = {'version': '10.2.1'}
     mock_collect_results.side_effect = [{
         'failed': {
             'TESTS_DID_NOT_START': ['not started']
         },
         'passed': []
     }, {
         'failed': {},
         'passed': ['Class1/passedTest1', 'Class1/passedTest2']
     }]
     launch_command = xcodebuild_runner.LaunchCommand(egtests,
                                                      _DESTINATION,
                                                      shards=1,
                                                      retries=3,
                                                      out_dir=self.tmpdir)
     self.fake_launch_attempt(launch_command, ['not_started', 'pass'])
     launch_command.launch()
     self.assertEqual(1, len(launch_command.test_results))
Ejemplo n.º 15
0
    def launch(self):
        """Launches the test app."""
        self.set_up()
        destination = 'id=%s' % self.udid
        if self.xctest:
            test_app = test_apps.EgtestsApp(self.app_path,
                                            included_tests=self.test_cases,
                                            env_vars=self.env_vars,
                                            test_args=self.test_args)
        else:
            test_app = test_apps.GTestsApp(self.app_path,
                                           included_tests=self.test_cases,
                                           env_vars=self.env_vars,
                                           test_args=self.test_args)
        out_dir = os.path.join(self.out_dir, 'TestResults')
        cmd = self.get_launch_command(test_app, out_dir, destination,
                                      self.shards)
        try:
            result = self._run(cmd=cmd, shards=self.shards or 1)
            if result.crashed and not result.crashed_test:
                # If the app crashed but not during any particular test case, assume
                # it crashed on startup. Try one more time.
                self.shutdown_and_restart()
                LOGGER.warning('Crashed on startup, retrying...\n')
                out_dir = os.path.join(self.out_dir,
                                       'retry_after_crash_on_startup')
                cmd = self.get_launch_command(test_app, out_dir, destination,
                                              self.shards)
                result = self._run(cmd)

            if result.crashed and not result.crashed_test:
                raise AppLaunchError

            passed = result.passed_tests
            failed = result.failed_tests
            flaked = result.flaked_tests

            try:
                while result.crashed and result.crashed_test:
                    # If the app crashes during a specific test case, then resume at the
                    # next test case. This is achieved by filtering out every test case
                    # which has already run.
                    LOGGER.warning('Crashed during %s, resuming...\n',
                                   result.crashed_test)
                    test_app.excluded_tests = passed + failed.keys(
                    ) + flaked.keys()
                    retry_out_dir = os.path.join(
                        self.out_dir,
                        'retry_after_crash_%d' % int(time.time()))
                    result = self._run(
                        self.get_launch_command(
                            test_app,
                            os.path.join(retry_out_dir, str(int(time.time()))),
                            destination))
                    passed.extend(result.passed_tests)
                    failed.update(result.failed_tests)
                    flaked.update(result.flaked_tests)
            except OSError as e:
                if e.errno == errno.E2BIG:
                    LOGGER.error('Too many test cases to resume.')
                else:
                    raise

            # Retry failed test cases.
            retry_results = {}
            test_app.excluded_tests = []
            if self.retries and failed:
                LOGGER.warning('%s tests failed and will be retried.\n',
                               len(failed))
                for i in xrange(self.retries):
                    for test in failed.keys():
                        LOGGER.info('Retry #%s for %s.\n', i + 1, test)
                        test_app.included_tests = [test]
                        retry_out_dir = os.path.join(self.out_dir,
                                                     test + '_failed',
                                                     'retry_%d' % i)
                        retry_result = self._run(
                            self.get_launch_command(test_app, retry_out_dir,
                                                    destination))
                        # If the test passed on retry, consider it flake instead of failure.
                        if test in retry_result.passed_tests:
                            flaked[test] = failed.pop(test)
                        # Save the result of the latest run for each test.
                        retry_results[test] = retry_result

            # Build test_results.json.
            # Check if if any of the retries crashed in addition to the original run.
            interrupted = (result.crashed
                           or any([r.crashed for r in retry_results.values()]))
            self.test_results['interrupted'] = interrupted
            self.test_results['num_failures_by_type'] = {
                'FAIL': len(failed) + len(flaked),
                'PASS': len(passed),
            }
            tests = collections.OrderedDict()
            for test in passed:
                tests[test] = {'expected': 'PASS', 'actual': 'PASS'}
            for test in failed:
                tests[test] = {'expected': 'PASS', 'actual': 'FAIL'}
            for test in flaked:
                tests[test] = {'expected': 'PASS', 'actual': 'FAIL'}
            self.test_results['tests'] = tests

            self.logs['passed tests'] = passed
            if flaked:
                self.logs['flaked tests'] = flaked
            if failed:
                self.logs['failed tests'] = failed
            for test, log_lines in failed.iteritems():
                self.logs[test] = log_lines
            for test, log_lines in flaked.iteritems():
                self.logs[test] = log_lines

            return not failed and not interrupted
        finally:
            self.tear_down()
Ejemplo n.º 16
0
 def test_found_xctest(self, mock_listdir):
     mock_listdir.return_value = [
         '/path/to/test_app.app/PlugIns/any_egtests.xctest'
     ]
     self.assertEqual('/PlugIns/any_egtests.xctest',
                      test_apps.EgtestsApp(_TEST_APP_PATH)._xctest_path())
Ejemplo n.º 17
0
 def testEgtests_not_found_egtests_app(self):
     self.mock(os.path, 'exists', lambda _: False)
     with self.assertRaises(test_runner.AppNotFoundError):
         test_apps.EgtestsApp(_EGTESTS_APP_PATH)
Ejemplo n.º 18
0
 def testEgtests_not_found_plugins(self):
     egtests = test_apps.EgtestsApp(_EGTESTS_APP_PATH)
     self.mock(os.path, 'exists', lambda _: False)
     with self.assertRaises(test_runner.PlugInsNotFoundError):
         egtests._xctest_path()
Ejemplo n.º 19
0
                        'ios_cwt_chromedriver_tests_module-Runner.app')
host_app = os.path.join(args.build_dir, 'ios_cwt_chromedriver_tests.app')
destination = iossim_util.get_simulator(args.device, args.os)

if not os.path.exists(args.out_dir):
    os.mkdir(args.out_dir)

# Make sure each run produces a unique output directory, since reusing an
# existing directory will cause CWTChromeDriver's dummy test case to get
# skipped, meaning that CWTChromeDriver's http server won't get launched.
output_directory = os.path.join(args.out_dir, 'run%d' % int(time.time()))

inserted_libs = []
if args.asan_build:
    inserted_libs = [
        os.path.join(args.build_dir, 'libclang_rt.asan_iossim_dynamic.dylib')
    ]

egtests_app = test_apps.EgtestsApp(egtests_app=test_app,
                                   test_args=['--port %s' % args.port],
                                   host_app_path=host_app,
                                   inserted_libs=inserted_libs)

launch_command = xcodebuild_runner.LaunchCommand(egtests_app,
                                                 destination,
                                                 shards=1,
                                                 retries=1,
                                                 out_dir=output_directory)

launch_command.launch()
Ejemplo n.º 20
0
    def launch(self):
        """Launches the test app."""
        self.set_up()
        destination = 'id=%s' % self.udid
        if self.xctest:
            test_app = test_apps.EgtestsApp(self.app_path,
                                            included_tests=self.test_cases,
                                            env_vars=self.env_vars,
                                            test_args=self.test_args)
        elif self.xctest_path:

            if self.__class__.__name__ == 'DeviceTestRunner':
                # When self.xctest is False and (bool)self.xctest_path is True and it's
                # using a device runner, this is a XCTest hosted unit test, which is
                # currently running on real devices.
                # TODO(crbug.com/1006881): Separate "running style" from "parser style"
                # for XCtests and Gtests.
                test_app = test_apps.DeviceXCTestUnitTestsApp(
                    self.app_path,
                    included_tests=self.test_cases,
                    env_vars=self.env_vars,
                    test_args=self.test_args)
            else:
                raise XCTestConfigError(
                    'Trying to run a DeviceXCTestUnitTestsApp on a'
                    'non device runner!')

        else:
            test_app = test_apps.GTestsApp(self.app_path,
                                           included_tests=self.test_cases,
                                           env_vars=self.env_vars,
                                           test_args=self.test_args)
        out_dir = os.path.join(self.out_dir, 'TestResults')
        cmd = self.get_launch_command(test_app, out_dir, destination,
                                      self.shards)
        try:
            result = self._run(cmd=cmd, shards=self.shards or 1)
            if result.crashed and not result.crashed_test:
                # If the app crashed but not during any particular test case, assume
                # it crashed on startup. Try one more time.
                self.shutdown_and_restart()
                LOGGER.warning('Crashed on startup, retrying...\n')
                out_dir = os.path.join(self.out_dir,
                                       'retry_after_crash_on_startup')
                cmd = self.get_launch_command(test_app, out_dir, destination,
                                              self.shards)
                result = self._run(cmd)

            if result.crashed and not result.crashed_test:
                raise AppLaunchError

            passed = result.passed_tests
            failed = result.failed_tests
            flaked = result.flaked_tests

            try:
                while result.crashed and result.crashed_test:
                    # If the app crashes during a specific test case, then resume at the
                    # next test case. This is achieved by filtering out every test case
                    # which has already run.
                    LOGGER.warning('Crashed during %s, resuming...\n',
                                   result.crashed_test)
                    test_app.excluded_tests = passed + failed.keys(
                    ) + flaked.keys()
                    retry_out_dir = os.path.join(
                        self.out_dir,
                        'retry_after_crash_%d' % int(time.time()))
                    result = self._run(
                        self.get_launch_command(
                            test_app,
                            os.path.join(retry_out_dir, str(int(time.time()))),
                            destination))
                    passed.extend(result.passed_tests)
                    failed.update(result.failed_tests)
                    flaked.update(result.flaked_tests)
            except OSError as e:
                if e.errno == errno.E2BIG:
                    LOGGER.error('Too many test cases to resume.')
                else:
                    raise

            # Instantiate this after crash retries so that all tests have a first
            # pass before entering the retry block below.
            # For each retry that passes, we want to mark it separately as passed
            # (ie/ "FAIL PASS"), with is_flaky=True.
            output = sju.StdJson(passed=passed, failed=failed, flaked=flaked)

            # Retry failed test cases.
            retry_results = {}
            test_app.excluded_tests = []
            if self.retries and failed:
                LOGGER.warning('%s tests failed and will be retried.\n',
                               len(failed))
                for i in xrange(self.retries):
                    for test in failed.keys():
                        LOGGER.info('Retry #%s for %s.\n', i + 1, test)
                        test_app.included_tests = [test]
                        retry_out_dir = os.path.join(self.out_dir,
                                                     test + '_failed',
                                                     'retry_%d' % i)
                        retry_result = self._run(
                            self.get_launch_command(test_app, retry_out_dir,
                                                    destination))
                        # If the test passed on retry, consider it flake instead of failure.
                        if test in retry_result.passed_tests:
                            flaked[test] = failed.pop(test)
                            output.mark_passed(test, flaky=True)
                        # Save the result of the latest run for each test.
                        retry_results[test] = retry_result

            # Build test_results.json.
            # Check if if any of the retries crashed in addition to the original run.
            interrupted = (result.crashed
                           or any([r.crashed for r in retry_results.values()]))
            self.test_results['interrupted'] = interrupted
            self.test_results['num_failures_by_type'] = {
                'FAIL': len(failed) + len(flaked),
                'PASS': len(passed),
            }

            self.test_results['tests'] = output.tests

            self.logs['passed tests'] = passed
            if flaked:
                self.logs['flaked tests'] = flaked
            if failed:
                self.logs['failed tests'] = failed
            for test, log_lines in failed.iteritems():
                self.logs[test] = log_lines
            for test, log_lines in flaked.iteritems():
                self.logs[test] = log_lines

            return not failed and not interrupted
        finally:
            self.tear_down()
Ejemplo n.º 21
0
    default='/tmp/cwt_chromedriver',
    help='Output directory for CWTChromeDriver\'s dummy test case')
parser.add_argument('--os', default='14.3', help='iOS version')
parser.add_argument('--device', default='iPhone 11 Pro', help='Device type')
args = parser.parse_args()

test_app = os.path.join(args.build_dir,
                        'ios_cwt_chromedriver_tests_module-Runner.app')
host_app = os.path.join(args.build_dir, 'ios_cwt_chromedriver_tests.app')
destination = iossim_util.get_simulator(args.device, args.os)

if not os.path.exists(args.out_dir):
    os.mkdir(args.out_dir)

# Make sure each run produces a unique output directory, since reusing an
# existing directory will cause CWTChromeDriver's dummy test case to get
# skipped, meaning that CWTChromeDriver's http server won't get launched.
output_directory = os.path.join(args.out_dir, 'run%d' % int(time.time()))

egtests_app = test_apps.EgtestsApp(egtests_app=test_app,
                                   test_args=['--port %s' % args.port],
                                   host_app_path=host_app)

launch_command = xcodebuild_runner.LaunchCommand(egtests_app,
                                                 destination,
                                                 shards=1,
                                                 retries=1,
                                                 out_dir=output_directory)

launch_command.launch()
Ejemplo n.º 22
0
    def launch(self):
        """Launches tests using xcodebuild."""
        launch_commands = []
        for params in self.sharding_data:
            test_app = test_apps.EgtestsApp(
                params['app'],
                included_tests=params['test_cases'],
                env_vars=self.env_vars,
                test_args=self.test_args,
                host_app_path=params['host'])
            launch_commands.append(
                LaunchCommand(test_app,
                              udid=params['udid'],
                              shards=params['shards'],
                              retries=self.retries,
                              out_dir=os.path.join(self.out_dir,
                                                   params['udid']),
                              env=self.get_launch_env()))

        thread_pool = pool.ThreadPool(len(launch_commands))
        attempts_results = []
        for result in thread_pool.imap_unordered(LaunchCommand.launch,
                                                 launch_commands):
            attempts_results.append(result['test_results']['attempts'])

        # Gets passed tests
        self.logs['passed tests'] = []
        for shard_attempts in attempts_results:
            for attempt in shard_attempts:
                self.logs['passed tests'].extend(attempt['passed'])

        # If the last attempt does not have failures, mark failed as empty
        self.logs['failed tests'] = []
        for shard_attempts in attempts_results:
            if shard_attempts[-1]['failed']:
                self.logs['failed tests'].extend(
                    shard_attempts[-1]['failed'].keys())

        # Gets all failures/flakes and lists them in bot summary
        all_failures = set()
        for shard_attempts in attempts_results:
            for attempt, attempt_results in enumerate(shard_attempts):
                for failure in attempt_results['failed']:
                    if failure not in self.logs:
                        self.logs[failure] = []
                    self.logs[failure].append('%s: attempt # %d' %
                                              (failure, attempt))
                    self.logs[failure].extend(
                        attempt_results['failed'][failure])
                    all_failures.add(failure)

        # Gets only flaky(not failed) tests.
        self.logs['flaked tests'] = list(all_failures -
                                         set(self.logs['failed tests']))

        # Gets not-started/interrupted tests
        all_tests_to_run = set(get_all_tests(self.app_path, self.test_cases))
        aborted_tests = list(all_tests_to_run -
                             set(self.logs['failed tests']) -
                             set(self.logs['passed tests']))
        aborted_tests.sort()
        self.logs['aborted tests'] = aborted_tests

        self.test_results['interrupted'] = bool(aborted_tests)
        self.test_results['num_failures_by_type'] = {
            'FAIL':
            len(self.logs['failed tests'] + self.logs['aborted tests']),
            'PASS': len(self.logs['passed tests']),
        }
        self.test_results['tests'] = collections.OrderedDict()

        for shard_attempts in attempts_results:
            for attempt, attempt_results in enumerate(shard_attempts):

                for test in attempt_results['failed'].keys(
                ) + self.logs['aborted tests']:
                    if attempt == len(shard_attempts) - 1:
                        test_result = 'FAIL'
                    else:
                        test_result = self.test_results['tests'].get(
                            test, {}).get('actual', '') + ' FAIL'
                    self.test_results['tests'][test] = {
                        'expected': 'PASS',
                        'actual': test_result.strip()
                    }

                for test in attempt_results['passed']:
                    test_result = self.test_results['tests'].get(test, {}).get(
                        'actual', '') + ' PASS'
                    self.test_results['tests'][test] = {
                        'expected': 'PASS',
                        'actual': test_result.strip()
                    }
                    if 'FAIL' in test_result:
                        self.test_results['tests'][test]['is_flaky'] = True

        # Test is failed if there are failures for the last run.
        return not self.logs['failed tests']
Ejemplo n.º 23
0
 def testEgtests_found_xctest(self):
     self.assertEqual(
         '/PlugIns/any_egtests.xctest',
         test_apps.EgtestsApp(_EGTESTS_APP_PATH)._xctest_path())
Ejemplo n.º 24
0
    def launch(self):
        """Launches tests using xcodebuild."""
        launch_commands = []
        for params in self.sharding_data:
            test_app = test_apps.EgtestsApp(
                params['app'],
                included_tests=params['test_cases'],
                env_vars=self.env_vars,
                test_args=self.test_args,
                release=self.release,
                host_app_path=params['host'])
            launch_commands.append(
                LaunchCommand(
                    test_app,
                    udid=params['udid'],
                    shards=params['shards'],
                    retries=self.retries,
                    out_dir=os.path.join(self.out_dir, params['udid']),
                    use_clang_coverage=(hasattr(self, 'use_clang_coverage')
                                        and self.use_clang_coverage),
                    env=self.get_launch_env()))

        thread_pool = pool.ThreadPool(len(launch_commands))
        attempts_results = []
        for result in thread_pool.imap_unordered(LaunchCommand.launch,
                                                 launch_commands):
            attempts_results.append(result['test_results']['attempts'])

        # Gets passed tests
        self.logs['passed tests'] = []
        for shard_attempts in attempts_results:
            for attempt in shard_attempts:
                self.logs['passed tests'].extend(attempt['passed'])

        # If the last attempt does not have failures, mark failed as empty
        self.logs['failed tests'] = []
        for shard_attempts in attempts_results:
            if shard_attempts[-1]['failed']:
                self.logs['failed tests'].extend(
                    shard_attempts[-1]['failed'].keys())

        # Gets all failures/flakes and lists them in bot summary
        all_failures = set()
        for shard_attempts in attempts_results:
            for attempt, attempt_results in enumerate(shard_attempts):
                for failure in attempt_results['failed']:
                    if failure not in self.logs:
                        self.logs[failure] = []
                    self.logs[failure].append('%s: attempt # %d' %
                                              (failure, attempt))
                    self.logs[failure].extend(
                        attempt_results['failed'][failure])
                    all_failures.add(failure)

        # Gets only flaky(not failed) tests.
        self.logs['flaked tests'] = list(all_failures -
                                         set(self.logs['failed tests']))

        # Gets not-started/interrupted tests.
        # all_tests_to_run takes into consideration that only a subset of tests may
        # have run due to the test sharding logic in run.py.
        all_tests_to_run = set([
            test_name for launch_command in launch_commands
            for test_name in launch_command.egtests_app.get_all_tests()
        ])

        aborted_tests = []
        # TODO(crbug.com/1048758): For device targets, the list of test names parsed
        # from otool output is incorrect. For multitasking or any flaky test suite,
        # the list contains more tests than what actually runs.
        if (self.__class__.__name__ != 'DeviceXcodeTestRunner'
                and 'ios_chrome_multitasking_eg' not in self.app_path
                and '_flaky_eg' not in self.app_path):
            aborted_tests = list(all_tests_to_run -
                                 set(self.logs['failed tests']) -
                                 set(self.logs['passed tests']))
        aborted_tests.sort()
        self.logs['aborted tests'] = aborted_tests

        self.test_results['interrupted'] = bool(aborted_tests)
        self.test_results['num_failures_by_type'] = {
            'FAIL':
            len(self.logs['failed tests'] + self.logs['aborted tests']),
            'PASS': len(self.logs['passed tests']),
        }

        output = sju.StdJson()
        for shard_attempts in attempts_results:
            for attempt, attempt_results in enumerate(shard_attempts):

                for test in attempt_results['failed'].keys():
                    output.mark_failed(test)

                # 'aborted tests' in logs is an array of strings, each string defined
                # as "{TestCase}/{testMethod}"
                for test in self.logs['aborted tests']:
                    output.mark_timeout(test)

                for test in attempt_results['passed']:
                    output.mark_passed(test)

        self.test_results['tests'] = output.tests

        # Test is failed if there are failures for the last run.
        # or if there are aborted tests.
        return not self.logs['failed tests'] and not self.logs['aborted tests']
Ejemplo n.º 25
0
 def testEgtests_not_found_xctest(self, mock_listdir):
     mock_listdir.return_value = ['random_file']
     egtest = test_apps.EgtestsApp(_EGTESTS_APP_PATH)
     with self.assertRaises(test_runner.XCTestPlugInNotFoundError):
         egtest._xctest_path()
Ejemplo n.º 26
0
 def testEgtests_xctestRunNode_without_filter(self):
     egtest_node = test_apps.EgtestsApp(
         _EGTESTS_APP_PATH).fill_xctestrun_node()['any_egtests_module']
     self.assertNotIn('OnlyTestIdentifiers', egtest_node)
     self.assertNotIn('SkipTestIdentifiers', egtest_node)