def _covered_test_configurations_for_builder_name(self): coverage = {} for builder_name in builders.all_builder_names(): coverage[builder_name] = self._test_configuration_converter.to_config_set( builders.coverage_specifiers_for_builder_name(builder_name) ) return coverage
def execute(self, options, args, tool): factory = self.expectations_factory() # FIXME: WebKit Linux 32 and WebKit Linux have the same specifiers; # if we include both of them, we'll get duplicate lines. Ideally # Linux 32 would have unique speicifiers. most_builders = builders.all_builder_names() if 'WebKit Linux 32' in most_builders: most_builders.remove('WebKit Linux 32') lines = self._collect_expectation_lines(most_builders, factory) lines.sort(key=lambda line: line.path) port = tool.port_factory.get() # Skip any tests which are mentioned in the dashboard but not in our checkout: fs = tool.filesystem lines = filter(lambda line: fs.exists(fs.join(port.layout_tests_dir(), line.path)), lines) # Note: This includes all flaky tests from the dashboard, even ones mentioned # in existing TestExpectations. We could certainly load existing TestExpecations # and filter accordingly, or update existing TestExpectations instead of FlakyTests. flaky_tests_path = fs.join(port.layout_tests_dir(), 'FlakyTests') flaky_tests_contents = self.FLAKY_TEST_CONTENTS % TestExpectations.list_to_string(lines) fs.write_text_file(flaky_tests_path, flaky_tests_contents) print "Updated %s" % flaky_tests_path if options.upload: return self._commit_and_upload(tool, options)
def _release_builders(self): release_builders = [] for builder_name in builders.all_builder_names(): port = self._tool.port_factory.get_from_builder_name(builder_name) if port.test_configuration().build_type == 'release': release_builders.append(builder_name) return release_builders
def _covered_test_configurations_for_builder_name(self): coverage = {} for builder_name in builders.all_builder_names(): coverage[ builder_name] = self._test_configuration_converter.to_config_set( builders.coverage_specifiers_for_builder_name( builder_name)) return coverage
def _builders_to_pull_from(self): chromium_buildbot_builder_names = [] for name in builders.all_builder_names(): chromium_buildbot_builder_names.append(name) chosen_names = self._tool.user.prompt_with_list( "Which builder to pull results from:", chromium_buildbot_builder_names, can_choose_multiple=True) return [self._builder_with_name(name) for name in chosen_names]
def _builders_to_pull_from(self): webkit_buildbot_builder_names = [] for name in builders.all_builder_names(): webkit_buildbot_builder_names.append(name) titles = ["build.webkit.org bots"] lists = [webkit_buildbot_builder_names] chosen_names = self._tool.user.prompt_with_multiple_lists("Which builder to pull results from:", titles, lists, can_choose_multiple=True) return [self._builder_with_name(name) for name in chosen_names]
def execute(self, options, args, tool): factory = self.expectations_factory() lines = self._collect_expectation_lines(builders.all_builder_names(), factory) lines.sort(key=lambda line: line.path) port = tool.port_factory.get() # Skip any tests which are mentioned in the dashboard but not in our checkout: fs = tool.filesystem lines = filter(lambda line: fs.exists(fs.join(port.layout_tests_dir(), line.path)), lines) print self.FLAKY_TEST_CONTENTS % TestExpectations.list_to_string(lines) # pylint: disable=E1601
def _builders_to_pull_from(self): chromium_buildbot_builder_names = [] webkit_buildbot_builder_names = [] for name in builders.all_builder_names(): if self._tool.port_factory.get_from_builder_name(name).is_chromium(): chromium_buildbot_builder_names.append(name) else: webkit_buildbot_builder_names.append(name) titles = ["build.webkit.org bots", "build.chromium.org bots"] lists = [webkit_buildbot_builder_names, chromium_buildbot_builder_names] chosen_names = self._tool.user.prompt_with_multiple_lists("Which builder to pull results from:", titles, lists, can_choose_multiple=True) return [self._builder_with_name(name) for name in chosen_names]
def execute(self, options, args, tool): factory = self.expectations_factory() lines = self._collect_expectation_lines(builders.all_builder_names(), factory) lines.sort(key=lambda line: line.path) port = tool.port_factory.get() # Skip any tests which are mentioned in the dashboard but not in our checkout: fs = tool.filesystem lines = filter(lambda line: fs.exists(fs.join(port.layout_tests_dir(), line.path)), lines) test_names = [line.name for line in lines] flakiness_dashbord_url = self.FLAKINESS_DASHBOARD_URL % ','.join(test_names) expectations_string = TestExpectations.list_to_string(lines) # pylint: disable=E1601 print self.OUTPUT % (self.HEADER, expectations_string, flakiness_dashbord_url)
def execute(self, options, args, tool): factory = self.expectations_factory() lines = self._collect_expectation_lines(builders.all_builder_names(), factory) lines.sort(key=lambda line: line.path) port = tool.port_factory.get() # Skip any tests which are mentioned in the dashboard but not in our checkout: fs = tool.filesystem lines = filter(lambda line: fs.exists(fs.join(port.layout_tests_dir(), line.path)), lines) # Note: This includes all flaky tests from the dashboard, even ones mentioned # in existing TestExpectations. We could certainly load existing TestExpecations # and filter accordingly, or update existing TestExpectations instead of FlakyTests. flaky_tests_path = fs.join(port.layout_tests_dir(), 'FlakyTests') flaky_tests_contents = self.FLAKY_TEST_CONTENTS % TestExpectations.list_to_string(lines) fs.write_text_file(flaky_tests_path, flaky_tests_contents) print "Updated %s" % flaky_tests_path if options.upload: return self._commit_and_upload(tool, options)
def _builders_to_pull_from(self): chromium_buildbot_builder_names = [] for name in builders.all_builder_names(): chromium_buildbot_builder_names.append(name) chosen_names = self._tool.user.prompt_with_list("Which builder to pull results from:", chromium_buildbot_builder_names, can_choose_multiple=True) return [self._builder_with_name(name) for name in chosen_names]