Example #1
0
    def _collect_expectation_lines(self, builder_names, factory):
        all_lines = []

        models = []
        for builder_name in builder_names:
            model = TestExpectationsModel()
            models.append(model)

            expectations = factory.expectations_for_builder(builder_name)

            # TODO(ojan): We should also skip bots that haven't uploaded recently,
            # e.g. if they're >24h stale.
            if not expectations:
                _log.error("Can't load flakiness data for builder: %s" % builder_name)
                continue

            for line in expectations.expectation_lines(only_ignore_very_flaky=True):
                # TODO(ojan): Find a way to merge specifiers instead of removing build types.
                # We can't just union because some specifiers will change the meaning of others.
                # For example, it's not clear how to merge [ Mac Release ] with [ Linux Debug ].
                # But, in theory we should be able to merge [ Mac Release ] and [ Mac Debug ].
                line.specifiers = self._filter_build_type_specifiers(line.specifiers)
                model.add_expectation_line(line)

        final_model = None
        for model in models:
            if final_model:
                final_model.merge_model(model)
            else:
                final_model = model
        return final_model._test_to_expectation_line.values()
Example #2
0
    def _collect_expectation_lines(self, builder_names, factory):
        models = []
        for builder_name in builder_names:
            model = TestExpectationsModel()
            models.append(model)

            expectations = factory.expectations_for_builder(builder_name)

            # TODO(ojan): We should also skip bots that haven't uploaded recently,
            # e.g. if they're >24h stale.
            if not expectations:
                _log.error("Can't load flakiness data for builder: %s",
                           builder_name)
                continue

            for line in expectations.expectation_lines(
                    only_ignore_very_flaky=True):
                # TODO(ojan): Find a way to merge specifiers instead of removing build types.
                # We can't just union because some specifiers will change the meaning of others.
                # For example, it's not clear how to merge [ Mac Release ] with [ Linux Debug ].
                # But, in theory we should be able to merge [ Mac Release ] and [ Mac Debug ].
                line.specifiers = self._filter_build_type_specifiers(
                    line.specifiers)
                model.add_expectation_line(line)

        final_model = None
        for model in models:
            if final_model:
                final_model.merge_model(model)
            else:
                final_model = model
        return final_model._test_to_expectation_line.values()
Example #3
0
 def __init__(self, argv, host, git_cl):
     self._args = parse_args(argv)
     self._host = host
     self._git_cl = git_cl
     self._expectations_model = TestExpectationsModel()
     self._test_configuration_converter = TestConfigurationConverter(
         set(BUILDERS.values()))
Example #4
0
 def _collect_expectation_lines(self, builder_names, factory):
     model = TestExpectationsModel()
     for builder_name in builder_names:
         expectations = factory.expectations_for_builder(builder_name)
         for line in expectations.expectation_lines(only_ignore_very_flaky=True):
             model.add_expectation_line(line)
     # FIXME: We need an official API to get all the test names or all test lines.
     return model._test_to_expectation_line.values()
Example #5
0
 def _collect_expectation_lines(self, builder_names, factory):
     model = TestExpectationsModel()
     for builder_name in builder_names:
         expectations = factory.expectations_for_builder(builder_name)
         for line in expectations.expectation_lines(only_ignore_very_flaky=True):
             model.add_expectation_line(line)
     # FIXME: We need an official API to get all the test names or all test lines.
     return model._test_to_expectation_line.values()
Example #6
0
 def __init__(self, argv, host, git_cl):
     self._args = parse_args(argv)
     self._host = host
     self._git_cl = git_cl
     self._expectations_model = TestExpectationsModel()
     self._test_configuration_converter = TestConfigurationConverter(
         set(BUILDER_CONFIGS.values()))
     self._filesystem = self._host.filesystem
     self._path_finder = PathFinder(self._filesystem)
     self._git = self._host.git()
Example #7
0
    def _collect_expectation_lines(self, builder_names, factory):
        all_lines = []

        models = []
        for builder_name in builder_names:
            model = TestExpectationsModel()
            models.append(model)

            expectations = factory.expectations_for_builder(builder_name)
            for line in expectations.expectation_lines(only_ignore_very_flaky=True):
                # TODO(ojan): Find a way to merge specifiers instead of removing build types.
                # We can't just union because some specifiers will change the meaning of others.
                # For example, it's not clear how to merge [ Mac Release ] with [ Linux Debug ].
                # But, in theory we should be able to merge [ Mac Release ] and [ Mac Debug ].
                line.specifiers = self._filter_build_type_specifiers(line.specifiers)
                model.add_expectation_line(line)

        final_model = None
        for model in models:
            if final_model:
                final_model.merge_model(model)
            else:
                final_model = model
        return final_model._test_to_expectation_line.values()
Example #8
0
class TryFlag(object):
    def __init__(self, argv, host, git_cl):
        self._args = parse_args(argv)
        self._host = host
        self._git_cl = git_cl
        self._expectations_model = TestExpectationsModel()
        self._test_configuration_converter = TestConfigurationConverter(
            set(BUILDER_CONFIGS.values()))
        self._filesystem = self._host.filesystem
        self._path_finder = PathFinder(self._filesystem)
        self._git = self._host.git()

    def _set_flag(self, flag):
        path = self._path_finder.path_from_layout_tests(FLAG_FILE)
        self._filesystem.write_text_file(path, flag + '\n')
        self._git.add_list([path])
        self._git.commit_locally_with_message(
            'Flag try job: force %s for run-webkit-tests.' % flag)

    def _clear_expectations(self, flag):
        path = self._path_finder.path_from_layout_tests(
            'FlagExpectations', flag.lstrip('-'))
        self._filesystem.write_text_file(path, '')
        self._git.add_list([path])
        self._git.commit_locally_with_message(
            'Flag try job: clear expectations for %s.' % flag)

    def trigger(self):
        flag = self._args.flag
        if flag:
            self._set_flag(flag)
            if self._args.regenerate:
                self._clear_expectations(flag)
            self._git_cl.run([
                'upload', '--bypass-hooks', '-f', '-m',
                'Flag try job for %s.' % flag
            ])
        for builder in sorted(BUILDER_MASTERS.keys()):
            master = BUILDER_MASTERS[builder]
            self._git_cl.trigger_try_jobs([builder], master)

    def _create_expectation_line(self, result, test_configuration):
        test_name = result.test_name()
        line = TestExpectationLine()
        line.name = test_name
        line.path = test_name
        line.matching_tests = [test_name]
        line.filename = ''
        if self._args.bug:
            line.bugs = ['crbug.com/%s' % self._args.bug]
        else:
            line.bugs = ['Bug(none)']
        line.expectations = result.actual_results().split()
        line.parsed_expectations = [
            TestExpectations.expectation_from_string(expectation)
            for expectation in line.expectations
        ]
        line.specifiers = [test_configuration.version]
        line.matching_configurations = set([test_configuration])
        return line

    def _process_result(self, build, result):
        if not result.did_run_as_expected():
            self._expectations_model.add_expectation_line(
                self._create_expectation_line(
                    result, BUILDER_CONFIGS[build.builder_name]),
                model_all_expectations=True)

    def update(self):
        self._host.print_('Fetching results...')
        # TODO: Get jobs from the _tryflag branch. Current branch for now.
        jobs = self._git_cl.latest_try_jobs(BUILDER_CONFIGS.keys())
        buildbot = self._host.buildbot
        for build in sorted(jobs.keys()):
            self._host.print_(
                '-- %s: %s/results.html' %
                (BUILDER_CONFIGS[build.builder_name].version,
                 buildbot.results_url(build.builder_name, build.build_number)))
            results = buildbot.fetch_results(build, True)
            results.for_each_test(
                lambda result, b=build: self._process_result(b, result))

        # TODO: Write to flag expectations file. For now, stdout. :)
        unexpected_failures = []
        unexpected_passes = []
        for line in self._expectations_model.all_lines():
            if TestExpectations.EXPECTATIONS[
                    'pass'] in line.parsed_expectations:
                unexpected_passes.append(line)
            else:
                unexpected_failures.append(line)

        self._print_all(unexpected_passes, 'unexpected passes')
        self._print_all(unexpected_failures, 'unexpected failures')

    def _print_all(self, lines, description):
        self._host.print_('\n### %s %s:\n' % (len(lines), description))
        for line in lines:
            self._host.print_(
                line.to_string(self._test_configuration_converter))

    def run(self):
        action = self._args.action
        if action == 'trigger':
            self.trigger()
        elif action == 'update':
            self.update()
        else:
            print >> self._host.stderr, 'specify "trigger" or "update"'
            return 1
        return 0
Example #9
0
class TryFlag(object):
    def __init__(self, argv, host, git_cl):
        self._args = parse_args(argv)
        self._host = host
        self._git_cl = git_cl
        self._expectations_model = TestExpectationsModel()
        self._test_configuration_converter = TestConfigurationConverter(
            set(BUILDERS.values()))

    def trigger(self):
        print 'TODO: implement "trigger"'

    def _create_expectation_line(self, result, test_configuration):
        test_name = result.test_name()
        line = TestExpectationLine()
        line.name = test_name
        line.path = test_name
        line.matching_tests = [test_name]
        line.filename = ''
        if self._args.bug:
            line.bugs = ['crbug.com/%s' % self._args.bug]
        else:
            line.bugs = ['Bug(none)']
        line.expectations = result.actual_results().split()
        line.parsed_expectations = [
            TestExpectations.expectation_from_string(expectation)
            for expectation in line.expectations
        ]
        line.specifiers = [test_configuration.version]
        line.matching_configurations = set([test_configuration])
        return line

    def _process_result(self, build, result):
        if not result.did_run_as_expected():
            self._expectations_model.add_expectation_line(
                self._create_expectation_line(result,
                                              BUILDERS[build.builder_name]),
                model_all_expectations=True)

    def update(self):
        self._host.print_('Fetching results...')
        # TODO: Get jobs from the _tryflag branch. Current branch for now.
        jobs = self._git_cl.latest_try_jobs(BUILDERS.keys())
        buildbot = self._host.buildbot
        for build in sorted(jobs.keys()):
            self._host.print_(
                '-- %s: %s/results.html' %
                (BUILDERS[build.builder_name].version,
                 buildbot.results_url(build.builder_name, build.build_number)))
            results = buildbot.fetch_results(build, full=True)
            results.for_each_test(
                lambda result, b=build: self._process_result(b, result))

        # TODO: Write to flag expectations file. For now, stdout. :)
        unexpected_failures = []
        unexpected_passes = []
        for line in self._expectations_model.all_lines():
            if TestExpectations.EXPECTATIONS[
                    'pass'] in line.parsed_expectations:
                unexpected_passes.append(line)
            else:
                unexpected_failures.append(line)

        self._print_all(unexpected_passes, 'unexpected passes')
        self._print_all(unexpected_failures, 'unexpected failures')

    def _print_all(self, lines, description):
        self._host.print_('\n### %s %s:\n' % (len(lines), description))
        for line in lines:
            self._host.print_(
                line.to_string(self._test_configuration_converter))

    def run(self):
        action = self._args.action
        if action == 'trigger':
            self.trigger()
        elif action == 'update':
            self.update()
        else:
            print >> self._host.stderr, 'specify "trigger" or "update"'
            return 1
        return 0