def launch(self): """Launches tests using xcodebuild.""" test_app = self.get_launch_test_app() launch_command = LaunchCommand( test_app, udid=self.udid, shards=self.shards, retries=self.retries, out_dir=os.path.join(self.out_dir, self.udid), use_clang_coverage=(hasattr(self, 'use_clang_coverage') and self.use_clang_coverage), env=self.get_launch_env()) overall_result = launch_command.launch() # Deletes simulator used in the tests after tests end. if iossim_util.is_device_with_udid_simulator(self.udid): iossim_util.delete_simulator_by_udid(self.udid) # Adds disabled tests to result. overall_result.add_and_report_test_names_status( launch_command.egtests_app.disabled_tests, TestStatus.SKIP, expected_status=TestStatus.SKIP, test_log='Test disabled.') # Adds unexpectedly skipped tests to result if applicable. tests_selected_at_runtime = _tests_decided_at_runtime(self.app_path) unexpectedly_skipped = [] # TODO(crbug.com/1048758): For the multitasking or any flaky test suites, # |all_tests_to_run| contains more tests than what actually runs. if not tests_selected_at_runtime: # |all_tests_to_run| takes into consideration that only a subset of tests # may have run due to the test sharding logic in run.py. all_tests_to_run = set(launch_command.egtests_app.get_all_tests()) unexpectedly_skipped = list(all_tests_to_run - overall_result.all_test_names()) overall_result.add_and_report_test_names_status( unexpectedly_skipped, TestStatus.SKIP, test_log=( 'The test is compiled in test target but was unexpectedly ' 'not run or not finished.')) # Reports a dummy crashed test result to indicate the crash status, i.e. # some tests might be unexpectedly skipped and do not appear in result. if unexpectedly_skipped or (overall_result.crashed and tests_selected_at_runtime): overall_result.add_and_report_crash(crash_message_prefix_line=( 'Test application crash happened and may ' 'result in missing tests:')) self.test_results = overall_result.standard_json_output( path_delimiter='/') self.logs.update(overall_result.test_runner_logs()) # |never_expected_tests| includes all unexpected results, including the # dummy crached status result if any. return not overall_result.never_expected_tests()
def deleteSimulator(self, udid=None): """Removes dynamically created simulator devices.""" if udid: iossim_util.delete_simulator_by_udid(udid)
def launch(self): """Launches tests using xcodebuild.""" launch_commands = [] for params in self.sharding_data: test_app = self.get_launch_test_app(params) launch_commands.append( LaunchCommand( test_app, udid=params['udid'], shards=params['shards'], retries=self.retries, out_dir=os.path.join(self.out_dir, params['udid']), use_clang_coverage=(hasattr(self, 'use_clang_coverage') and self.use_clang_coverage), env=self.get_launch_env())) thread_pool = pool.ThreadPool(len(launch_commands)) attempts_results = [] for result in thread_pool.imap_unordered(LaunchCommand.launch, launch_commands): attempts_results.append(result['test_results']['attempts']) # Deletes simulator used in the tests after tests end. if iossim_util.is_device_with_udid_simulator(self.udid): iossim_util.delete_simulator_by_udid(self.udid) # Gets passed tests self.logs['passed tests'] = [] for shard_attempts in attempts_results: for attempt in shard_attempts: self.logs['passed tests'].extend(attempt['passed']) # If the last attempt does not have failures, mark failed as empty self.logs['failed tests'] = [] for shard_attempts in attempts_results: if shard_attempts[-1]['failed']: self.logs['failed tests'].extend( shard_attempts[-1]['failed'].keys()) # Gets disabled tests from test app object if any. self.logs['disabled tests'] = [] for launch_command in launch_commands: self.logs['disabled tests'].extend( launch_command.egtests_app.disabled_tests) # Gets all failures/flakes and lists them in bot summary all_failures = set() for shard_attempts in attempts_results: for attempt, attempt_results in enumerate(shard_attempts): for failure in attempt_results['failed']: if failure not in self.logs: self.logs[failure] = [] self.logs[failure].append('%s: attempt # %d' % (failure, attempt)) self.logs[failure].extend( attempt_results['failed'][failure]) all_failures.add(failure) # Gets only flaky(not failed) tests. self.logs['flaked tests'] = list(all_failures - set(self.logs['failed tests'])) # Gets not-started/interrupted tests. # all_tests_to_run takes into consideration that only a subset of tests may # have run due to the test sharding logic in run.py. all_tests_to_run = set([ test_name for launch_command in launch_commands for test_name in launch_command.egtests_app.get_all_tests() ]) aborted_tests = [] # TODO(crbug.com/1048758): For device targets, the list of test names parsed # from otool output is incorrect. For multitasking or any flaky test suite, # the list contains more tests than what actually runs. if (self.__class__.__name__ != 'DeviceXcodeTestRunner' and 'ios_chrome_multitasking_eg' not in self.app_path and '_flaky_eg' not in self.app_path): aborted_tests = list(all_tests_to_run - set(self.logs['failed tests']) - set(self.logs['passed tests'])) aborted_tests.sort() self.logs['aborted tests'] = aborted_tests self.test_results['interrupted'] = bool(aborted_tests) self.test_results['num_failures_by_type'] = { 'FAIL': len(self.logs['failed tests'] + self.logs['aborted tests']), 'PASS': len(self.logs['passed tests']), } output = sju.StdJson() for shard_attempts in attempts_results: for attempt, attempt_results in enumerate(shard_attempts): for test in attempt_results['failed'].keys(): # TODO(crbug.com/1178923): Remove unicode check when it's figured out # where unicode is introduced. log_lines = [] for line in self.logs.get(test, []): if sys.version_info.major == 2: if isinstance(line, unicode): LOGGER.warning('Unicode string: %s' % line) line = line.encode('utf-8') log_lines.append(line) output.mark_failed(test, test_log='\n'.join(log_lines)) # 'aborted tests' in logs is an array of strings, each string defined # as "{TestCase}/{testMethod}" for test in self.logs['aborted tests']: output.mark_timeout(test) for test in attempt_results['passed']: output.mark_passed(test) output.mark_all_disabled(self.logs['disabled tests']) output.finalize() self.test_results['tests'] = output.tests # Test is failed if there are failures for the last run. # or if there are aborted tests. return not self.logs['failed tests'] and not self.logs['aborted tests']
def test_delete_simulator_by_udid(self, subprocess_mock, _): """Ensures that command is correct.""" iossim_util.delete_simulator_by_udid('UDID') self.assertEqual(['xcrun', 'simctl', 'delete', 'UDID'], subprocess_mock.call_args[0][0])