def print_one_line_summary(self, total, expected, unexpected): """Print a one-line summary of the test run to stdout. Args: total: total number of tests run expected: number of expected results unexpected: number of unexpected results """ if self.disabled('one-line-summary'): return incomplete = total - expected - unexpected incomplete_str = '' if incomplete: self._write("") incomplete_str = " (%d didn't run)" % incomplete if unexpected == 0: if expected == total: if expected > 1: self._write("All %d tests ran as expected." % expected) else: self._write("The test ran as expected.") else: self._write("%s ran as expected%s." % (grammar.pluralize('test', expected), incomplete_str)) else: self._write("%s ran as expected, %d didn't%s:" % (grammar.pluralize('test', expected), unexpected, incomplete_str)) self._write("")
def print_one_line_summary(self, total, expected, unexpected): """Print a one-line summary of the test run to stdout. Args: total: total number of tests run expected: number of expected results unexpected: number of unexpected results """ if self.disabled('one-line-summary'): return incomplete = total - expected - unexpected incomplete_str = '' if incomplete: self._write("") incomplete_str = " (%d didn't run)" % incomplete if unexpected == 0: if expected == total: if expected > 1: self._write("All %d tests ran as expected." % expected) else: self._write("The test ran as expected.") else: self._write( "%s ran as expected%s." % (grammar.pluralize('test', expected), incomplete_str)) else: self._write( "%s ran as expected, %d didn't%s:" % (grammar.pluralize( 'test', expected), unexpected, incomplete_str)) self._write("")
def _print_one_line_summary(self, total, expected, unexpected): incomplete = total - expected - unexpected incomplete_str = '' if incomplete: self._print_default("") incomplete_str = " (%d didn't run)" % incomplete if self._options.verbose or self._options.debug_rwt_logging or unexpected: self.writeln("") summary = '' if unexpected == 0: if expected == total: if expected > 1: summary = "All %d tests ran as expected." % expected else: summary = "The test ran as expected." else: summary = "%s ran as expected%s." % (grammar.pluralize( 'test', expected), incomplete_str) else: summary = "%s ran as expected, %d didn't%s:" % (grammar.pluralize( 'test', expected), unexpected, incomplete_str) self._print_quiet(summary) self._print_quiet("")
def _print_counts(self, counters_by_email): def counter_cmp(a_tuple, b_tuple): # split the tuples # the second element is the "counter" structure _, a_counter = a_tuple _, b_counter = b_tuple count_result = cmp(a_counter['count'], b_counter['count']) if count_result: return -count_result return cmp(a_counter['latest_name'].lower(), b_counter['latest_name'].lower()) for author_email, counter in sorted(counters_by_email.items(), counter_cmp): if author_email != counter['latest_email']: continue contributor = self._committer_list.contributor_by_email(author_email) author_name = counter['latest_name'] patch_count = counter['count'] counter['names'] = counter['names'] - set([author_name]) counter['emails'] = counter['emails'] - set([author_email]) alias_list = [] for alias in counter['names']: alias_list.append(alias) for alias in counter['emails']: alias_list.append(alias) if alias_list: print "CONTRIBUTOR: %s (%s) has %s %s" % (author_name, author_email, grammar.pluralize(patch_count, "reviewed patch"), "(aliases: " + ", ".join(alias_list) + ")") else: print "CONTRIBUTOR: %s (%s) has %s" % (author_name, author_email, grammar.pluralize(patch_count, "reviewed patch")) return
def _print_one_line_summary(self, total, expected, unexpected): incomplete = total - expected - unexpected incomplete_str = "" if incomplete: self._print_default("") incomplete_str = " (%d didn't run)" % incomplete if self._options.verbose or self._options.debug_rwt_logging or unexpected: self.writeln("") summary = "" if unexpected == 0: if expected == total: if expected > 1: summary = "All %d tests ran as expected." % expected else: summary = "The test ran as expected." else: summary = "%s ran as expected%s." % (grammar.pluralize("test", expected), incomplete_str) else: summary = "%s ran as expected, %d didn't%s:" % ( grammar.pluralize("test", expected), unexpected, incomplete_str, ) self._print_quiet(summary) self._print_quiet("")
def _print_counts(self, counters_by_email): counters = sorted(counters_by_email.items(), key=lambda counter: counter[1]['count']) counters = sorted(counters, key=lambda counter: counter[1]['latest_name']) for author_email, counter in counters: if author_email != counter['latest_email']: continue contributor = self._committer_list.contributor_by_email( author_email) author_name = counter['latest_name'] patch_count = counter['count'] counter['names'] = counter['names'] - set([author_name]) counter['emails'] = counter['emails'] - set([author_email]) alias_list = [] for alias in counter['names']: alias_list.append(alias) for alias in counter['emails']: alias_list.append(alias) if alias_list: print("CONTRIBUTOR: %s (%s) has %s %s" % (author_name, author_email, grammar.pluralize(patch_count, "reviewed patch"), "(aliases: " + ", ".join(alias_list) + ")")) else: print("CONTRIBUTOR: %s (%s) has %s" % (author_name, author_email, grammar.pluralize(patch_count, "reviewed patch"))) return
def check_arguments_and_execute(self, options, args, tool=None): if len(args) < len(self.required_arguments): log("%s required, %s provided. Provided: %s Required: %s\nSee '%s help %s' for usage." % (pluralize("argument", len(self.required_arguments)), pluralize("argument", len(args)), "'%s'" % " ".join(args), " ".join(self.required_arguments), tool.name(), self.name)) return 1 return self.execute(options, args, tool) or 0
def print_workers_and_shards(self, num_workers, num_shards): driver_name = self._port.driver_name() if num_workers == 1: self._print_default("Running 1 %s." % driver_name) self._print_debug("(%s)." % grammar.pluralize(num_shards, "shard")) else: self._print_default("Running %s in parallel." % (grammar.pluralize(num_workers, driver_name))) self._print_debug("(%d shards)." % num_shards) self._print_default('')
def print_found(self, num_all_test_files, num_to_run, repeat_each, iterations): found_str = 'Found %s; running %d' % (grammar.pluralize( num_all_test_files, "test"), num_to_run) if repeat_each * iterations > 1: found_str += ' (%s each: --repeat-each=%d --iterations=%d)' % ( grammar.pluralize(repeat_each * iterations, "time"), repeat_each, iterations) found_str += ', skipping %d' % (num_all_test_files - num_to_run) self._print_default(found_str + '.')
def execute(self, options, args, tool): self._prepare_to_process(options, args, tool) patches = self._fetch_list_of_patches_to_process(options, args, tool) # It's nice to print out total statistics. bugs_to_patches = self._collect_patches_by_bug(patches) _log.info("Processing %s from %s." % (pluralize("patch", len(patches)), pluralize("bug", len(bugs_to_patches)))) for patch in patches: self._process_patch(patch, options, args, tool)
def print_workers_and_shards(self, num_workers, num_shards): driver_name = self._port.driver_name() if num_workers == 1: self._print_default('Running 1 {}.'.format(driver_name)) self._print_debug('({}).'.format(grammar.pluralize(num_shards, "shard"))) else: self._print_default('Running {} in parallel.'.format(grammar.pluralize(num_workers, driver_name))) self._print_debug('({} shards).'.format(num_shards)) self._print_default('')
def _print_one_line_summary(self, total_time, run_results): if self._options.timing: parallel_time = sum( result.total_run_time for result in run_results.results_by_name.values()) # There is serial overhead in layout_test_runner.run() that we can't easily account for when # really running in parallel, but taking the min() ensures that in the worst case # (if parallel time is less than run_time) we do account for it. serial_time = total_time - min(run_results.run_time, parallel_time) speedup = (parallel_time + serial_time) / total_time timing_summary = ' in %.2fs (%.2fs in rwt, %.2gx)' % ( total_time, serial_time, speedup) else: timing_summary = '' total = run_results.total - run_results.expected_skips expected = run_results.expected - run_results.expected_skips unexpected = run_results.unexpected incomplete = total - expected - unexpected incomplete_str = '' if incomplete: self._print_default("") incomplete_str = " (%d didn't run)" % incomplete if self._options.verbose or self._options.debug_rwt_logging or unexpected: self.writeln("") expected_summary_str = '' if run_results.expected_failures > 0: expected_summary_str = " (%d passed, %d didn't)" % ( expected - run_results.expected_failures, run_results.expected_failures) summary = '' if unexpected == 0: if expected == total: if expected > 1: summary = "All %d tests ran as expected%s%s." % ( expected, expected_summary_str, timing_summary) else: summary = "The test ran as expected%s%s." % ( expected_summary_str, timing_summary) else: summary = "%s ran as expected%s%s%s." % (grammar.pluralize( 'test', expected), expected_summary_str, incomplete_str, timing_summary) else: summary = "%s ran as expected%s, %d didn't%s%s:" % ( grammar.pluralize('test', expected), expected_summary_str, unexpected, incomplete_str, timing_summary) self._print_quiet(summary) self._print_quiet("")
def check_arguments_and_execute(self, options, args, tool=None): if len(args) < len(self.required_arguments): log("%s required, %s provided. Provided: %s Required: %s\nSee '%s help %s' for usage." % ( pluralize("argument", len(self.required_arguments)), pluralize("argument", len(args)), "'%s'" % " ".join(args), " ".join(self.required_arguments), tool.name(), self.name)) return 1 return self.execute(options, args, tool) or 0
def print_workers_and_shards(self, num_workers, num_shards): driver_name = self._port.driver_name() device_suffix = ' for device "{}"'.format(self._options.device_class) if self._options.device_class else '' if num_workers == 1: self._print_default('Running 1 {}{}.'.format(driver_name, device_suffix)) self._print_debug('({}).'.format(grammar.pluralize(num_shards, "shard"))) else: self._print_default('Running {} in parallel{}.'.format(grammar.pluralize(num_workers, driver_name), device_suffix)) self._print_debug('({} shards).'.format(num_shards)) self._print_default('')
def _fetch_list_of_patches_to_process(self, options, args, tool): all_patches = [] for bug_id in args: patches = tool.bugs.fetch_bug(bug_id).reviewed_patches() _log.info("%s found on bug %s." % (pluralize("reviewed patch", len(patches)), bug_id)) all_patches += patches if not all_patches: _log.info("No reviewed patches found, looking for unreviewed patches.") for bug_id in args: patches = tool.bugs.fetch_bug(bug_id).patches() _log.info("%s found on bug %s." % (pluralize("patch", len(patches)), bug_id)) all_patches += patches return all_patches
def print_workers_and_shards(self, num_workers, num_shards): driver_name = self._port.driver_name() device_suffix = ' for device "{}"'.format( self._options.device_class) if self._options.device_class else '' if num_workers == 1: self._print_default('Running 1 {}{}.'.format( driver_name, device_suffix)) self._print_debug('({}).'.format( grammar.pluralize(num_shards, "shard"))) else: self._print_default('Running {} in parallel{}.'.format( grammar.pluralize(num_workers, driver_name), device_suffix)) self._print_debug('({} shards).'.format(num_shards)) self._print_default('')
def print_found(self, num_all_test_files, num_to_run, repeat_each, iterations): num_unique_tests = num_to_run / (repeat_each * iterations) found_str = 'Found %s; running %d' % (grammar.pluralize('test', num_all_test_files), num_unique_tests) if repeat_each * iterations > 1: found_str += ' (%d times each: --repeat-each=%d --iterations=%d)' % (repeat_each * iterations, repeat_each, iterations) found_str += ', skipping %d' % (num_all_test_files - num_unique_tests) self._print_default(found_str + '.')
def feed(self): current_time = datetime.today() ids_needing_review = set( self._tool.bugs.queries.fetch_attachment_ids_from_review_queue( current_time - timedelta(7))) security_ids_needing_review = frozenset( self._tool.bugs.queries.fetch_attachment_ids_from_review_queue( current_time - timedelta(7), only_security_bugs=True)) new_ids = ids_needing_review.difference(self._ids_sent_to_server) _log.info( "Feeding EWS (%s, %s new)" % (pluralize(len(ids_needing_review), "r? patch"), len(new_ids))) for attachment_id in new_ids: # Order doesn't really matter for the EWS. # Download patches from security sensitive bugs and upload them to the status server since the # EWS queues do not have permission to fetch them directly from Bugzilla. attachment_data = None if attachment_id in security_ids_needing_review: attachment = self._tool.bugs.fetch_attachment(attachment_id) if not attachment: _log.error('Failed to retrieve attachment {}'.format( attachment_id)) continue self._tool.status_server.upload_attachment(attachment) self._tool.status_server.submit_to_ews(attachment_id) self._ids_sent_to_server.add(attachment_id)
def print_result(self, run_time): write = self.meter.writeln write("Ran %s in %.3fs" % (pluralize(self.num_started, "test"), run_time)) if self.num_failures or self.num_errors: write("FAILED (failures=%d, errors=%d)\n" % (self.num_failures, self.num_errors)) else: write("\nOK\n")
def _fetch_list_of_patches_to_process(self, options, args, tool): all_patches = [] for bug_id in args: patches = tool.bugs.fetch_bug(bug_id).reviewed_patches() log("%s found on bug %s." % (pluralize("reviewed patch", len(patches)), bug_id)) all_patches += patches return all_patches
def print_expected(self, num_all_test_files, result_summary, tests_with_result_type_callback): self._print_expected('Found %s.' % grammar.pluralize('test', num_all_test_files)) self._print_expected_results_of_type(result_summary, test_expectations.PASS, "passes", tests_with_result_type_callback) self._print_expected_results_of_type(result_summary, test_expectations.FAIL, "failures", tests_with_result_type_callback) self._print_expected_results_of_type(result_summary, test_expectations.FLAKY, "flaky", tests_with_result_type_callback) self._print_expected_results_of_type(result_summary, test_expectations.SKIP, "skipped", tests_with_result_type_callback) self._print_expected('') if self._options.repeat_each > 1: self._print_expected('Running each test %d times.' % self._options.repeat_each) if self._options.iterations > 1: self._print_expected('Running %d iterations of the tests.' % self._options.iterations) if self._options.iterations > 1 or self._options.repeat_each > 1: self._print_expected('')
def feed(self): ids_needing_review = set(self._tool.bugs.queries.fetch_attachment_ids_from_review_queue()) new_ids = ids_needing_review.difference(self._ids_sent_to_server) log("Feeding EWS (%s, %s new)" % (pluralize("r? patch", len(ids_needing_review)), len(new_ids))) for attachment_id in new_ids: # Order doesn't really matter for the EWS. self._tool.status_server.submit_to_ews(attachment_id) self._ids_sent_to_server.add(attachment_id)
def _print_one_line_summary(self, total_time, run_results): if self._options.timing: parallel_time = sum(result.total_run_time for result in run_results.results_by_name.values()) # There is serial overhead in layout_test_runner.run() that we can't easily account for when # really running in parallel, but taking the min() ensures that in the worst case # (if parallel time is less than run_time) we do account for it. serial_time = total_time - min(run_results.run_time, parallel_time) speedup = (parallel_time + serial_time) / total_time timing_summary = ' in %.2fs (%.2fs in rwt, %.2gx)' % (total_time, serial_time, speedup) else: timing_summary = '' total = run_results.total - run_results.expected_skips expected = run_results.expected - run_results.expected_skips unexpected = run_results.unexpected incomplete = total - expected - unexpected incomplete_str = '' if incomplete: self._print_default("") incomplete_str = " (%d didn't run)" % incomplete if self._options.verbose or self._options.debug_rwt_logging or unexpected: self.writeln("") expected_summary_str = '' if run_results.expected_failures > 0: expected_summary_str = " (%d passed, %d didn't)" % ( expected - run_results.expected_failures, run_results.expected_failures) summary = '' if unexpected == 0: if expected == total: if expected > 1: summary = "All %d tests ran as expected%s%s." % (expected, expected_summary_str, timing_summary) else: summary = "The test ran as expected%s%s." % (expected_summary_str, timing_summary) else: summary = "%s ran as expected%s%s%s." % (grammar.pluralize( 'test', expected), expected_summary_str, incomplete_str, timing_summary) else: summary = "%s ran as expected%s, %d didn't%s%s:" % (grammar.pluralize( 'test', expected), expected_summary_str, unexpected, incomplete_str, timing_summary) self._print_quiet(summary) self._print_quiet("")
def _guess_reviewer_from_bug(self, bug_id): patches = self._tool.bugs.fetch_bug(bug_id).reviewed_patches() if len(patches) != 1: log("%s on bug %s, cannot infer reviewer." % (pluralize("reviewed patch", len(patches)), bug_id)) return None patch = patches[0] log("Guessing \"%s\" as reviewer from attachment %s on bug %s." % (patch.reviewer().full_name, patch.id(), bug_id)) return patch.reviewer().full_name
def print_workers_and_shards(self, num_workers, num_shards, num_locked_shards): driver_name = self._port.driver_name() if num_workers == 1: self._print_default("Running 1 %s over %s." % (driver_name, grammar.pluralize('shard', num_shards))) else: self._print_default("Running %d %ss in parallel over %d shards (%d locked)." % (num_workers, driver_name, num_shards, num_locked_shards)) self._print_default('')
def run_tests(self, expectations, test_inputs, tests_to_skip, num_workers, retrying): self._expectations = expectations self._test_inputs = test_inputs self._retrying = retrying # FIXME: rename all variables to test_run_results or some such ... run_results = TestRunResults(self._expectations, len(test_inputs) + len(tests_to_skip)) self._current_run_results = run_results self._printer.num_tests = len(test_inputs) self._printer.num_started = 0 if not retrying: self._printer.print_expected( run_results, self._expectations.model().get_tests_with_result_type) for test_name in set(tests_to_skip): result = test_results.TestResult(test_name) result.type = test_expectations.SKIP run_results.add(result, expected=True, test_is_slow=self._test_is_slow(test_name)) self._printer.write_update('Sharding tests ...') all_shards = self._sharder.shard_tests( test_inputs, int(self._options.child_processes), self._options.fully_parallel) self._printer.print_workers_and_shards(num_workers, len(all_shards)) if self._options.dry_run: return run_results self._printer.write_update('Starting %s ...' % grammar.pluralize(num_workers, "worker")) try: with message_pool.get(self, self._worker_factory, num_workers, self._port.worker_startup_delay_secs(), self._port.host) as pool: pool.run(('test_list', shard.name, shard.test_inputs) for shard in all_shards) except TestRunInterruptedException as e: _log.warning(e.reason) run_results.interrupted = True except KeyboardInterrupt: self._printer.flush() self._printer.writeln('Interrupted, exiting ...') run_results.keyboard_interrupted = True except Exception as e: _log.debug('%s("%s") raised, exiting' % (e.__class__.__name__, str(e))) raise return run_results
def print_result(self, run_time): write = self.meter.writeln write('Ran %s in %.3fs' % (pluralize(self.num_started, "test"), run_time)) if self.num_failures or self.num_errors: write('FAILED (failures=%d, errors=%d)\n' % (self.num_failures, self.num_errors)) else: write('\nOK\n')
def print_workers_and_shards(self, num_workers, num_shards, num_locked_shards): driver_name = self._port.driver_name() if num_workers == 1: self._print_default("Running 1 %s." % driver_name) self._print_debug("(%s)." % grammar.pluralize("shard", num_shards)) else: self._print_default("Running %d %ss in parallel." % (num_workers, driver_name)) self._print_debug("(%d shards; %d locked)." % (num_shards, num_locked_shards)) self._print_default("")
def print_workers_and_shards(self, num_workers, num_shards, num_locked_shards): driver_name = self._port.driver_name() if num_workers == 1: self._print_default("Running 1 %s." % driver_name) self._print_debug("(%s)." % grammar.pluralize('shard', num_shards)) else: self._print_default("Running %d %ss in parallel." % (num_workers, driver_name)) self._print_debug("(%d shards; %d locked)." % (num_shards, num_locked_shards)) self._print_default('')
def print_found(self, num_all_test_files, num_shard_test_files, num_to_run, repeat_each, iterations): found_str = 'Found %s' % grammar.pluralize('test', num_shard_test_files) if num_all_test_files != num_shard_test_files: found_str += ' (total %d)' % num_all_test_files found_str += '; running %d' % num_to_run if repeat_each * iterations > 1: found_str += ' (%d times each: --repeat-each=%d --iterations=%d)' % (repeat_each * iterations, repeat_each, iterations) found_str += ', skipping %d' % (num_shard_test_files - num_to_run) self._print_default(found_str + '.')
def run_tests( self, expectations, test_inputs, tests_to_skip, num_workers, needs_http, needs_websockets, needs_web_platform_test_server, retrying, ): self._expectations = expectations self._test_inputs = test_inputs self._needs_http = needs_http self._needs_websockets = needs_websockets self._needs_web_platform_test_server = needs_web_platform_test_server self._retrying = retrying # FIXME: rename all variables to test_run_results or some such ... run_results = TestRunResults(self._expectations, len(test_inputs) + len(tests_to_skip)) self._current_run_results = run_results self._printer.num_tests = len(test_inputs) self._printer.num_started = 0 if not retrying: self._printer.print_expected(run_results, self._expectations.model().get_tests_with_result_type) for test_name in set(tests_to_skip): result = test_results.TestResult(test_name) result.type = test_expectations.SKIP run_results.add(result, expected=True, test_is_slow=self._test_is_slow(test_name)) self._printer.write_update("Sharding tests ...") all_shards = self._sharder.shard_tests( test_inputs, int(self._options.child_processes), self._options.fully_parallel ) if (self._needs_http and self._options.http) or self._needs_web_platform_test_server: self.start_servers() num_workers = min(num_workers, len(all_shards)) self._printer.print_workers_and_shards(num_workers, len(all_shards)) if self._options.dry_run: return run_results self._printer.write_update("Starting %s ..." % grammar.pluralize(num_workers, "worker")) try: with message_pool.get( self, self._worker_factory, num_workers, self._port.worker_startup_delay_secs(), self._port.host ) as pool: pool.run(("test_list", shard.name, shard.test_inputs) for shard in all_shards) except TestRunInterruptedException, e: _log.warning(e.reason) run_results.interrupted = True
def run_tests(self, expectations, test_inputs, tests_to_skip, num_workers, needs_http, needs_websockets, needs_web_platform_test_server, retrying): self._expectations = expectations self._test_inputs = test_inputs self._needs_http = needs_http self._needs_websockets = needs_websockets self._needs_web_platform_test_server = needs_web_platform_test_server self._retrying = retrying # FIXME: rename all variables to test_run_results or some such ... run_results = TestRunResults(self._expectations, len(test_inputs) + len(tests_to_skip)) self._current_run_results = run_results self._printer.num_tests = len(test_inputs) self._printer.num_started = 0 if not retrying: self._printer.print_expected( run_results, self._expectations.model().get_tests_with_result_type) for test_name in set(tests_to_skip): result = test_results.TestResult(test_name) result.type = test_expectations.SKIP run_results.add(result, expected=True, test_is_slow=self._test_is_slow(test_name)) self._printer.write_update('Sharding tests ...') all_shards = self._sharder.shard_tests( test_inputs, int(self._options.child_processes), self._options.fully_parallel) if (self._needs_http and self._options.http) or self._needs_web_platform_test_server: self.start_servers() num_workers = min(num_workers, len(all_shards)) self._printer.print_workers_and_shards(num_workers, len(all_shards)) if self._options.dry_run: return run_results self._printer.write_update('Starting %s ...' % grammar.pluralize(num_workers, "worker")) try: with message_pool.get(self, self._worker_factory, num_workers, self._port.worker_startup_delay_secs(), self._port.host) as pool: pool.run(('test_list', shard.name, shard.test_inputs) for shard in all_shards) except TestRunInterruptedException, e: _log.warning(e.reason) run_results.interrupted = True
def _builder_to_pull_from(self): builder_statuses = self.tool.buildbot.builder_statuses() red_statuses = [status for status in builder_statuses if not status["is_green"]] print "%s failing" % (pluralize("builder", len(red_statuses))) builder_choices = [status["name"] for status in red_statuses] chosen_name = self.tool.user.prompt_with_list("Which builder to pull results from:", builder_choices) # FIXME: prompt_with_list should really take a set of objects and a set of names and then return the object. for status in red_statuses: if status["name"] == chosen_name: return (self.tool.buildbot.builder_with_name(chosen_name), status["build_number"])
def _num_workers(self, num_shards): num_workers = min(int(self._options.child_processes), num_shards) driver_name = self._port.driver_name() if num_workers == 1: self._printer.print_config("Running 1 %s over %s" % (driver_name, grammar.pluralize('shard', num_shards))) else: self._printer.print_config("Running %d %ss in parallel over %d shards" % (num_workers, driver_name, num_shards)) return num_workers
def run(self, state): if not self._options.obsolete_patches: return bug_id = state["bug_id"] patches = self._tool.bugs.fetch_bug(bug_id).patches() if not patches: return log("Obsoleting %s on bug %s" % (pluralize("old patch", len(patches)), bug_id)) for patch in patches: self._tool.bugs.obsolete_attachment(patch.id())
def _builder_to_pull_from(self): builder_statuses = self._tool.buildbot.builder_statuses() red_statuses = [status for status in builder_statuses if not status["is_green"]] _log.info("%s failing" % (pluralize("builder", len(red_statuses)))) builder_choices = [status["name"] for status in red_statuses] chosen_name = self._tool.user.prompt_with_list("Which builder to pull results from:", builder_choices) # FIXME: prompt_with_list should really take a set of objects and a set of names and then return the object. for status in red_statuses: if status["name"] == chosen_name: return (self._tool.buildbot.builder_with_name(chosen_name), status["build_number"])
def run(self, state): if not self._options.obsolete_patches: return bug_id = state["bug_id"] patches = self._tool.bugs.fetch_bug(bug_id).patches() if not patches: return _log.info("Obsoleting %s on bug %s" % (pluralize(len(patches), "old patch"), bug_id)) for patch in patches: self._tool.bugs.obsolete_attachment(patch.id())
def _builder_to_explain(self): builder_statuses = self._tool.buildbot.builder_statuses() red_statuses = [status for status in builder_statuses if not status["is_green"]] print "%s failing" % (pluralize("builder", len(red_statuses))) builder_choices = [status["name"] for status in red_statuses] # We could offer an "All" choice here. chosen_name = self._tool.user.prompt_with_list("Which builder to diagnose:", builder_choices) # FIXME: prompt_with_list should really take a set of objects and a set of names and then return the object. for status in red_statuses: if status["name"] == chosen_name: return (self._tool.buildbot.builder_with_name(chosen_name), status["built_revision"])
def run_tests(self, test_inputs, expectations, result_summary, num_workers, needs_http, needs_websockets, retrying): """Returns a tuple of (thread_timings, test_timings, individual_test_timings): thread_timings is a list of dicts with the total runtime of each thread with 'name', 'num_tests', 'total_time' properties test_timings is a list of timings for each sharded subdirectory of the form [time, directory_name, num_tests] individual_test_timings is a list of run times for each test in the form {filename:filename, test_run_time:test_run_time} """ self._current_result_summary = result_summary self._expectations = expectations self._needs_http = needs_http self._needs_websockets = needs_websockets self._retrying = retrying self._test_files_list = [test_input.test_name for test_input in test_inputs] self._printer.num_tests = len(self._test_files_list) self._printer.num_completed = 0 self._all_results = [] self._group_stats = {} self._worker_stats = {} self._has_http_lock = False self._remaining_locked_shards = [] self._printer.write_update('Sharding tests ...') locked_shards, unlocked_shards = self._sharder.shard_tests(test_inputs, int(self._options.child_processes), self._options.fully_parallel) # FIXME: We don't have a good way to coordinate the workers so that # they don't try to run the shards that need a lock if we don't actually # have the lock. The easiest solution at the moment is to grab the # lock at the beginning of the run, and then run all of the locked # shards first. This minimizes the time spent holding the lock, but # means that we won't be running tests while we're waiting for the lock. # If this becomes a problem in practice we'll need to change this. all_shards = locked_shards + unlocked_shards self._remaining_locked_shards = locked_shards if self._port.requires_http_server() or (locked_shards and self._options.http): self.start_servers_with_lock(2 * min(num_workers, len(locked_shards))) num_workers = min(num_workers, len(all_shards)) self._printer.print_workers_and_shards(num_workers, len(all_shards), len(locked_shards)) if self._options.dry_run: return (self._worker_stats.values(), self._group_stats, self._all_results) self._printer.write_update('Starting %s ...' % grammar.pluralize('worker', num_workers)) try: with message_pool.get(self, self._worker_factory, num_workers, self._port.worker_startup_delay_secs(), self._port.host) as pool: pool.run(('test_list', shard.name, shard.test_inputs) for shard in all_shards) except TestRunInterruptedException, e: _log.warning(e.reason) result_summary.interrupted = True
def _builder_to_explain(self): builder_statuses = self.tool.buildbot.builder_statuses() red_statuses = [status for status in builder_statuses if not status["is_green"]] print "%s failing" % (pluralize("builder", len(red_statuses))) builder_choices = [status["name"] for status in red_statuses] # We could offer an "All" choice here. chosen_name = User.prompt_with_list("Which builder to diagnose:", builder_choices) # FIXME: prompt_with_list should really take a set of objects and a set of names and then return the object. for status in red_statuses: if status["name"] == chosen_name: return (self.tool.buildbot.builder_with_name(chosen_name), status["built_revision"])
def run_tests(self, expectations, test_inputs, tests_to_skip, num_workers, retrying): self._expectations = expectations self._test_inputs = test_inputs self._retrying = retrying # FIXME: rename all variables to test_run_results or some such ... run_results = TestRunResults(self._expectations, len(test_inputs) + len(tests_to_skip)) self._current_run_results = run_results self._printer.num_tests = len(test_inputs) self._printer.num_completed = 0 if not retrying: self._printer.print_expected( run_results, self._expectations.get_tests_with_result_type) for test_name in set(tests_to_skip): result = test_results.TestResult(test_name) result.type = test_expectations.SKIP run_results.add(result, expected=True, test_is_slow=self._test_is_slow(test_name)) self._printer.write_update('Sharding tests ...') locked_shards, unlocked_shards = self._sharder.shard_tests( test_inputs, int(self._options.child_processes), self._options.fully_parallel) # We don't have a good way to coordinate the workers so that they don't # try to run the shards that need a lock. The easiest solution is to # run all of the locked shards first. all_shards = locked_shards + unlocked_shards num_workers = min(num_workers, len(all_shards)) self._printer.print_workers_and_shards(num_workers, len(all_shards), len(locked_shards)) if self._options.dry_run: return run_results self._printer.write_update('Starting %s ...' % grammar.pluralize('worker', num_workers)) start_time = time.time() try: with message_pool.get(self, self._worker_factory, num_workers, self._port.worker_startup_delay_secs(), self._port.host) as pool: pool.run(('test_list', shard.name, shard.test_inputs) for shard in all_shards) except TestRunInterruptedException, e: _log.warning(e.reason) run_results.interrupted = True
def print_workers_and_shards(self, num_workers, num_shards, num_locked_shards): driver_name = self._port.driver_name() if num_workers == 1: self._print_config( "Running 1 %s over %s." % (driver_name, grammar.pluralize('shard', num_shards))) else: self._print_config( "Running %d %ss in parallel over %d shards (%d locked)." % (num_workers, driver_name, num_shards, num_locked_shards)) self._print_config('')
def run_tests(self, expectations, test_inputs, tests_to_skip, num_workers, needs_http, needs_websockets, retrying): self._expectations = expectations self._test_inputs = test_inputs self._needs_http = needs_http self._needs_websockets = needs_websockets self._retrying = retrying # FIXME: rename all variables to test_run_results or some such ... run_results = TestRunResults(self._expectations, len(test_inputs) + len(tests_to_skip)) self._current_run_results = run_results self._remaining_locked_shards = [] self._has_http_lock = False self._printer.num_tests = len(test_inputs) self._printer.num_started = 0 if not retrying: self._printer.print_expected(run_results, self._expectations.model().get_tests_with_result_type) for test_name in set(tests_to_skip): result = test_results.TestResult(test_name) result.type = test_expectations.SKIP run_results.add(result, expected=True, test_is_slow=self._test_is_slow(test_name)) self._printer.write_update('Sharding tests ...') locked_shards, unlocked_shards = self._sharder.shard_tests(test_inputs, int(self._options.child_processes), self._options.fully_parallel) # FIXME: We don't have a good way to coordinate the workers so that # they don't try to run the shards that need a lock if we don't actually # have the lock. The easiest solution at the moment is to grab the # lock at the beginning of the run, and then run all of the locked # shards first. This minimizes the time spent holding the lock, but # means that we won't be running tests while we're waiting for the lock. # If this becomes a problem in practice we'll need to change this. all_shards = locked_shards + unlocked_shards self._remaining_locked_shards = locked_shards if locked_shards and self._options.http: self.start_servers_with_lock(2 * min(num_workers, len(locked_shards))) num_workers = min(num_workers, len(all_shards)) self._printer.print_workers_and_shards(num_workers, len(all_shards), len(locked_shards)) if self._options.dry_run: return run_results self._printer.write_update('Starting %s ...' % grammar.pluralize(num_workers, "worker")) try: with message_pool.get(self, self._worker_factory, num_workers, self._port.worker_startup_delay_secs(), self._port.host) as pool: pool.run(('test_list', shard.name, shard.test_inputs) for shard in all_shards) except TestRunInterruptedException, e: _log.warning(e.reason) run_results.interrupted = True
def run_tests(self, expectations, test_inputs, tests_to_skip, num_workers, needs_http, needs_websockets, retrying): self._expectations = expectations self._test_inputs = test_inputs self._needs_http = needs_http self._needs_websockets = needs_websockets self._retrying = retrying # FIXME: rename all variables to test_run_results or some such ... run_results = TestRunResults(self._expectations, len(test_inputs) + len(tests_to_skip)) self._current_run_results = run_results self._remaining_locked_shards = [] self._has_http_lock = False self._printer.num_tests = len(test_inputs) self._printer.num_started = 0 if not retrying: self._printer.print_expected(run_results, self._expectations.get_tests_with_result_type) for test_name in set(tests_to_skip): result = test_results.TestResult(test_name) result.type = test_expectations.SKIP run_results.add(result, expected=True, test_is_slow=self._test_is_slow(test_name)) self._printer.write_update('Sharding tests ...') locked_shards, unlocked_shards = self._sharder.shard_tests(test_inputs, int(self._options.child_processes), self._options.fully_parallel) # FIXME: We don't have a good way to coordinate the workers so that # they don't try to run the shards that need a lock if we don't actually # have the lock. The easiest solution at the moment is to grab the # lock at the beginning of the run, and then run all of the locked # shards first. This minimizes the time spent holding the lock, but # means that we won't be running tests while we're waiting for the lock. # If this becomes a problem in practice we'll need to change this. all_shards = locked_shards + unlocked_shards self._remaining_locked_shards = locked_shards if locked_shards and self._options.http: self.start_servers_with_lock(2 * min(num_workers, len(locked_shards))) num_workers = min(num_workers, len(all_shards)) self._printer.print_workers_and_shards(num_workers, len(all_shards), len(locked_shards)) if self._options.dry_run: return run_results self._printer.write_update('Starting %s ...' % grammar.pluralize('worker', num_workers)) try: with message_pool.get(self, self._worker_factory, num_workers, self._port.worker_startup_delay_secs(), self._port.host) as pool: pool.run(('test_list', shard.name, shard.test_inputs) for shard in all_shards) except TestRunInterruptedException, e: _log.warning(e.reason) run_results.interrupted = True
def test_pluralize(self): self.assertEqual(pluralize(1, "patch"), "1 patch") self.assertEqual(pluralize(2, "patch"), "2 patches") self.assertEqual(pluralize(1, "patch", True), "1 patch") self.assertEqual(pluralize(2, "patch", True), "2 patches") self.assertEqual(pluralize(1, "patch", False), "patch") self.assertEqual(pluralize(2, "patch", False), "patches")
def run_tests(self, expectations, test_inputs, tests_to_skip, num_workers, retry_attempt): self._expectations = expectations self._test_inputs = test_inputs self._retry_attempt = retry_attempt self._shards_to_redo = [] # FIXME: rename all variables to test_run_results or some such ... run_results = TestRunResults(self._expectations, len(test_inputs) + len(tests_to_skip)) self._current_run_results = run_results self._printer.num_tests = len(test_inputs) self._printer.num_completed = 0 if retry_attempt < 1: self._printer.print_expected(run_results, self._expectations.get_tests_with_result_type) for test_name in set(tests_to_skip): result = test_results.TestResult(test_name) result.type = test_expectations.SKIP run_results.add(result, expected=True, test_is_slow=self._test_is_slow(test_name)) self._printer.write_update('Sharding tests ...') locked_shards, unlocked_shards = self._sharder.shard_tests(test_inputs, int(self._options.child_processes), self._options.fully_parallel, self._options.run_singly or (self._options.batch_size == 1)) # We don't have a good way to coordinate the workers so that they don't # try to run the shards that need a lock. The easiest solution is to # run all of the locked shards first. all_shards = locked_shards + unlocked_shards num_workers = min(num_workers, len(all_shards)) if retry_attempt < 1: self._printer.print_workers_and_shards(num_workers, len(all_shards), len(locked_shards)) if self._options.dry_run: return run_results self._printer.write_update('Starting %s ...' % grammar.pluralize('worker', num_workers)) start_time = time.time() try: with message_pool.get(self, self._worker_factory, num_workers, self._port.host) as pool: pool.run(('test_list', shard.name, shard.test_inputs) for shard in all_shards) if self._shards_to_redo: num_workers -= len(self._shards_to_redo) if num_workers > 0: with message_pool.get(self, self._worker_factory, num_workers, self._port.host) as pool: pool.run(('test_list', shard.name, shard.test_inputs) for shard in self._shards_to_redo) except TestRunInterruptedException, e: _log.warning(e.reason) run_results.interrupted = True
def _check_diff_failure(self, error_log, tool): if not error_log: return None revert_failure_message_start = error_log.find("Failed to apply reverse diff for revision") if revert_failure_message_start == -1: return None lines = error_log[revert_failure_message_start:].split('\n')[1:] files = list(itertools.takewhile(lambda line: tool.filesystem.exists(tool.scm().absolute_path(line)), lines)) if files: return "Failed to apply reverse diff for %s: %s" % (pluralize(len(files), "file", showCount=False), ", ".join(files)) return None
def _run_tests(self, file_list, result_summary): """Runs the tests in the file_list. Return: A tuple (interrupted, keyboard_interrupted, thread_timings, test_timings, individual_test_timings) interrupted is whether the run was interrupted keyboard_interrupted is whether the interruption was because someone typed Ctrl^C thread_timings is a list of dicts with the total runtime of each thread with 'name', 'num_tests', 'total_time' properties test_timings is a list of timings for each sharded subdirectory of the form [time, directory_name, num_tests] individual_test_timings is a list of run times for each test in the form {filename:filename, test_run_time:test_run_time} result_summary: summary object to populate with the results """ self._printer.print_update("Sharding tests ...") num_workers = self._num_workers() test_lists = self._shard_tests(file_list, num_workers > 1 and not self._options.experimental_fully_parallel) filename_queue = Queue.Queue() for item in test_lists: filename_queue.put(item) self._printer.print_update("Starting %s ..." % grammar.pluralize("worker", num_workers)) self._message_broker = message_broker.get(self._port, self._options) broker = self._message_broker self._current_filename_queue = filename_queue self._current_result_summary = result_summary if not self._options.dry_run: threads = broker.start_workers(self) else: threads = {} self._printer.print_update("Starting testing ...") keyboard_interrupted = False interrupted = False if not self._options.dry_run: try: broker.run_message_loop() except KeyboardInterrupt: _log.info("Interrupted, exiting") broker.cancel_workers() keyboard_interrupted = True interrupted = True except TestRunInterruptedException, e: _log.info(e.reason) broker.cancel_workers() interrupted = True except:
def _fetch_list_of_patches_to_process(self, options, args, tool): all_patches = [] for url in args: bug_id = urls.parse_bug_id(url) if bug_id: patches = tool.bugs.fetch_bug(bug_id).patches() _log.info("%s found on bug %s." % (pluralize("patch", len(patches)), bug_id)) all_patches += patches attachment_id = urls.parse_attachment_id(url) if attachment_id: all_patches += tool.bugs.fetch_attachment(attachment_id) return all_patches
def execute(self, options, args, tool): builder_statuses = tool.buildbot.builder_statuses() longest_builder_name = max(map(len, map(lambda builder: builder["name"], builder_statuses))) failing_builders = 0 for builder_status in builder_statuses: # If the builder is green, print OK, exit. if builder_status["is_green"]: continue self._print_blame_information_for_builder(builder_status, name_width=longest_builder_name) failing_builders += 1 if failing_builders: print "%s of %s are failing" % (failing_builders, pluralize("builder", len(builder_statuses))) else: print "All builders are passing!"
def _message_for_revert(cls, revision_list, reason, description_list, reverted_bug_url_list, rollout_bug_url=None): message = "Unreviewed, rolling out %s.\n" % grammar.join_with_separators(['r' + str(revision) for revision in revision_list]) if rollout_bug_url: message += "%s\n" % rollout_bug_url message += "\n" if reason: message += "%s\n" % reason message += "\n" message += "Reverted %s:\n\n" % grammar.pluralize(len(revision_list), "changeset", showCount=False) for index in range(len(revision_list)): if description_list[index]: message += "\"%s\"\n" % description_list[index] if reverted_bug_url_list[index]: message += "%s\n" % reverted_bug_url_list[index] message += "%s\n\n" % urls.view_revision_url(revision_list[index]) return message