def test_remove_expectation(self):
     port = MockHost().port_factory.get('test-win-win7')
     raw_expectations_1 = ('# tags: [ Mac Win ]\n'
                         '# results: [ Failure Pass ]\n'
                         '\n'
                         'crbug.com/2432 [ Win ] test1 [ Failure ]\n')
     raw_expectations_2 = ('# tags: [ Mac Win ]\n'
                         '# results: [ Failure Pass ]\n'
                         '\n'
                         'crbug.com/2432 [ Win ] test1 [ Pass ]\n')
     raw_expectations_3 = ('# tags: [ Mac Win ]\n'
                         '# results: [ Failure Pass ]\n'
                         '# conflict_resolution: Override \n'
                         '\n'
                         'crbug.com/2432 [ Win ] test1 [ Pass ]\n')
     expectations_dict = OrderedDict()
     expectations_dict['/tmp/TestExpectations'] = raw_expectations_1
     expectations_dict['/tmp/TestExpectations2'] = raw_expectations_2
     test_expectations = TestExpectations(port, expectations_dict)
     self.assertEqual(test_expectations.get_expectations('test1'),
                      Expectation(
                          test='test1', results=set([ResultType.Pass, ResultType.Failure]),
                          is_slow_test=False, reason='crbug.com/2432'
                      ))
     expectations_dict = OrderedDict()
     expectations_dict['/tmp/TestExpectations'] = raw_expectations_1
     expectations_dict['/tmp/TestExpectations2'] = raw_expectations_3
     test_expectations = TestExpectations(port, expectations_dict)
     self.assertEqual(test_expectations.get_expectations('test1'),
                      Expectation(
                          test='test1', results=set([ResultType.Pass]),
                          is_slow_test=False, reason='crbug.com/2432'
                      ))
 def test_add_expectation_at_line(self):
     port = MockHost().port_factory.get('test-win-win7')
     raw_expectations = ('# tags: [ Mac Win ]\n'
                         '# results: [ Failure Crash ]\n'
                         '\n'
                         '# add expectations after this line\n'
                         'test1 [ Failure ]\n'
                         '\n')
     expectations_dict = OrderedDict()
     expectations_dict['/tmp/TestExpectations'] = raw_expectations
     test_expectations = TestExpectations(port, expectations_dict)
     test_expectations.add_expectations('/tmp/TestExpectations', [
         Expectation(test='test2',
                     results=set([ResultType.Crash]),
                     tags=set(['win']))
     ], 4)
     test_expectations.remove_expectations('/tmp/TestExpectations', [
         Expectation(
             test='test1', results=set([ResultType.Failure]), lineno=5)
     ])
     test_expectations.commit_changes()
     content = port.host.filesystem.read_text_file('/tmp/TestExpectations')
     self.assertEqual(content, ('# tags: [ Mac Win ]\n'
                                '# results: [ Failure Crash ]\n'
                                '\n'
                                '# add expectations after this line\n'
                                '[ Win ] test2 [ Crash ]\n'
                                '\n'))
 def test_remove_after_add(self):
     port = MockHost().port_factory.get('test-win-win7')
     raw_expectations = ('# tags: [ Mac Win ]\n'
                         '# results: [ Failure Crash ]\n'
                         '\n'
                         '# This comment will not be deleted\n'
                         '[ mac ] test1 [ Failure ]\n')
     expectations_dict = OrderedDict()
     expectations_dict['/tmp/TestExpectations'] = ''
     expectations_dict['/tmp/TestExpectations2'] = raw_expectations
     test_expectations = TestExpectations(port, expectations_dict)
     test_to_exps = test_expectations._expectations[1].individual_exps
     test_expectations.add_expectations('/tmp/TestExpectations2', [
         Expectation(test='test2', results=set([ResultType.Failure])),
         Expectation(test='test3',
                     results=set([ResultType.Crash]),
                     tags=set(['mac']))
     ], 5)
     test_expectations.remove_expectations('/tmp/TestExpectations2',
                                           [test_to_exps['test1'][0]])
     test_expectations.commit_changes()
     content = port.host.filesystem.read_text_file('/tmp/TestExpectations2')
     self.assertEqual(content, ('# tags: [ Mac Win ]\n'
                                '# results: [ Failure Crash ]\n'
                                '\n'
                                '# This comment will not be deleted\n'
                                'test2 [ Failure ]\n'
                                '[ Mac ] test3 [ Crash ]\n'))
Esempio n. 4
0
    def _collect_expectation_lines(self, builder_names, factory):
        exps = []
        for builder_name in builder_names:

            expectations = factory.expectations_for_builder(builder_name)

            # TODO(ojan): We should also skip bots that haven't uploaded recently,
            # e.g. if they're >24h stale.
            if not expectations:
                _log.error("Can't load flakiness data for builder: %s",
                           builder_name)
                continue

            for line in expectations.expectation_lines(
                    only_ignore_very_flaky=True):
                # TODO(ojan): Find a way to merge specifiers instead of removing build types.
                # We can't just union because some specifiers will change the meaning of others.
                # For example, it's not clear how to merge [ Mac Release ] with [ Linux Debug ].
                # But, in theory we should be able to merge [ Mac Release ] and [ Mac Debug ].
                tags = self._filter_build_type_specifiers(line.tags)
                exps.append(
                    Expectation(tags=tags,
                                results=line.results,
                                test=line.test))
        return exps
    def write_to_test_expectations(self, test_to_results):
        """Each expectations file is browser specific, and currently only
        runs on pie. Therefore we do not need any configuration specifiers
        to anotate expectations for certain builds.

        Args:
            test_to_results: A dictionary that maps test names to another
            dictionary which maps a tuple of build configurations and to
            a test result.
        Returns:
            Dictionary mapping test names to lists of expectation strings.
        """
        browser_to_exp_path = {
            browser: PRODUCTS_TO_EXPECTATION_FILE_PATHS[product]
            for product, browser in PRODUCTS_TO_BROWSER_TAGS.items()}
        untriaged_exps = defaultdict(dict)

        for path in self._test_expectations.expectations_dict:
            marker_lineno = self._get_marker_line_number(path)
            exp_lines = self._test_expectations.get_updated_lines(path)
            for i in range(marker_lineno, len(exp_lines)):
                if (not exp_lines[i].to_string().strip() or
                        exp_lines[i].to_string().startswith('#')):
                    break
                untriaged_exps[path][exp_lines[i].test] = exp_lines[i]

        for path, test_exps in untriaged_exps.items():
            self._test_expectations.remove_expectations(
                path, test_exps.values())

        for results_test_name, platform_results in test_to_results.items():
            exps_test_name = 'external/wpt/%s' % results_test_name
            for configs, test_results in platform_results.items():
                for config in configs:
                    path = browser_to_exp_path[config.browser]
                    # no system specifiers are necessary because we are
                    # writing to browser specific expectations files for
                    # only one Android version.
                    unexpected_results = {r for r in test_results.actual.split()
                                          if r not in test_results.expected.split()}

                    if exps_test_name not in untriaged_exps[path]:
                        untriaged_exps[path][exps_test_name] = Expectation(
                            test=exps_test_name, reason=self.UMBRELLA_BUG,
                            results=unexpected_results)
                    else:
                        untriaged_exps[path][exps_test_name].add_expectations(
                            unexpected_results, reason=self.UMBRELLA_BUG)

        for path in untriaged_exps:
            marker_lineno = self._get_marker_line_number(path)
            self._test_expectations.add_expectations(
                path,
                sorted(untriaged_exps[path].values(), key=lambda e: e.test),
                marker_lineno)

        self._test_expectations.commit_changes()
        # TODO(rmhasan): Return dictionary mapping test names to lists of
        # test expectation strings.
        return {}
Esempio n. 6
0
 def _create_expectation_line(self, result, test_configuration):
     expected_results = set([res for res in result.actual_results().split()])
     tag = test_configuration.version
     reason = ''
     if self._args.bug:
         reason = 'crbug.com/' + self._args.bug
     return Expectation(
         test=result.test_name(), results=expected_results, tags=set([tag]), reason=reason)
Esempio n. 7
0
 def _maybe_create_never_fix_expectation(
         self, path, test, test_skipped, tags):
     if test_skipped:
         exps = self._test_expectations.get_expectations_from_file(
             path, test)
         wontfix = self._never_fix_expectations.matches_an_expected_result(
             test, ResultType.Skip)
         temporary_skip = any(ResultType.Skip in exp.results for exp in exps)
         if not (wontfix or temporary_skip):
             return Expectation(
                 test=test, reason=self.UMBRELLA_BUG,
                 results={ResultType.Skip}, tags=tags, raw_tags=tags)
 def test_add_expectations_to_end_of_file(self):
     port = MockHost().port_factory.get('test-win-win7')
     raw_expectations = ('# tags: [ Mac Win ]\n'
                         '# tags: [ release ]\n'
                         '# results: [ Failure ]\n'
                         '\n'
                         '# this is a block of expectations\n'
                         'test [ failure ]\n')
     expectations_dict = OrderedDict()
     expectations_dict['/tmp/TestExpectations'] = ''
     expectations_dict['/tmp/TestExpectations2'] = raw_expectations
     test_expectations = TestExpectations(port, expectations_dict)
     test_expectations.add_expectations(
         '/tmp/TestExpectations2',
         [Expectation(test='test3', results=set([ResultType.Failure]))],
         append_to_end_of_file=True)
     test_expectations.add_expectations('/tmp/TestExpectations2', [
         Expectation(test='test2',
                     tags={'mac', 'release'},
                     results={ResultType.Crash, ResultType.Failure})
     ],
                                        append_to_end_of_file=True)
     test_expectations.add_expectations(
         '/tmp/TestExpectations2',
         [Expectation(test='test1', results=set([ResultType.Pass]))],
         append_to_end_of_file=True)
     test_expectations.commit_changes()
     content = port.host.filesystem.read_text_file('/tmp/TestExpectations2')
     self.assertEqual(content, ('# tags: [ Mac Win ]\n'
                                '# tags: [ release ]\n'
                                '# results: [ Failure ]\n'
                                '\n'
                                '# this is a block of expectations\n'
                                'test [ failure ]\n'
                                '\n'
                                'test1 [ Pass ]\n'
                                '[ Release Mac ] test2 [ Failure Crash ]\n'
                                'test3 [ Failure ]\n'))
 def test_add_expectation(self):
     port = MockHost().port_factory.get('test-win-win7')
     raw_expectations = ('# tags: [ Mac Win ]\n' '# results: [ Failure ]\n')
     expectations_dict = OrderedDict()
     expectations_dict['/tmp/TestExpectations'] = ''
     expectations_dict['/tmp/TestExpectations2'] = raw_expectations
     test_expectations = TestExpectations(port, expectations_dict)
     test_expectations.add_expectations(
         '/tmp/TestExpectations2',
         [Expectation(test='test1', results=set([ResultType.Failure]))])
     test_expectations.commit_changes()
     content = port.host.filesystem.read_text_file('/tmp/TestExpectations2')
     self.assertEqual(content, ('# tags: [ Mac Win ]\n'
                                '# results: [ Failure ]\n'
                                '\n'
                                'test1 [ Failure ]\n'))
    def test_add_expectation_outside_file_size_range(self):
        port = MockHost().port_factory.get('test-win-win7')
        raw_expectations = ('# tags: [ Mac Win ]\n'
                            '# tags: [ release ]\n'
                            '# results: [ Failure ]\n'
                            '\n'
                            '# this is a block of expectations\n'
                            'test [ failure ]\n')
        expectations_dict = OrderedDict()
        expectations_dict['/tmp/TestExpectations'] = ''
        expectations_dict['/tmp/TestExpectations2'] = raw_expectations
        test_expectations = TestExpectations(port, expectations_dict)

        with self.assertRaises(ValueError) as ctx:
            test_expectations.add_expectations(
                '/tmp/TestExpectations2',
                [Expectation(test='test3', results=set([ResultType.Failure]))],
                lineno=100)
            test_expectations.commit_changes()
        self.assertIn('greater than the total line count', str(ctx.exception))
    def test_add_expectation_with_negative_lineno(self):
        port = MockHost().port_factory.get('test-win-win7')
        raw_expectations = ('# tags: [ Mac Win ]\n'
                            '# tags: [ release ]\n'
                            '# results: [ Failure ]\n'
                            '\n'
                            '# this is a block of expectations\n'
                            'test [ failure ]\n')
        expectations_dict = OrderedDict()
        expectations_dict['/tmp/TestExpectations'] = ''
        expectations_dict['/tmp/TestExpectations2'] = raw_expectations
        test_expectations = TestExpectations(port, expectations_dict)

        with self.assertRaises(ValueError) as ctx:
            test_expectations.add_expectations(
                '/tmp/TestExpectations2',
                [Expectation(test='test3', results=set([ResultType.Failure]))],
                lineno=-1)
            test_expectations.commit_changes()
        self.assertIn('cannot be negative', str(ctx.exception))
Esempio n. 12
0
    def test_use_append_to_end_flag_non_zero_lineno(self):
        # Use append_to_end_of_file=True with lineno != 0
        # An exception should be raised.
        port = MockHost().port_factory.get('test-win-win7')
        raw_expectations = ('# tags: [ Mac Win ]\n'
                            '# tags: [ release ]\n'
                            '# results: [ Failure ]\n'
                            '\n'
                            '# this is a block of expectations\n'
                            'test [ failure ]\n')
        expectations_dict = OrderedDict()
        expectations_dict['/tmp/TestExpectations'] = ''
        expectations_dict['/tmp/TestExpectations2'] = raw_expectations
        test_expectations = TestExpectations(port, expectations_dict)

        with self.assertRaises(ValueError) as ctx:
            test_expectations.add_expectations(
                '/tmp/TestExpectations2',
                [Expectation(test='test3',
                             results=set([ResultType.Failure]))],
                lineno=100, append_to_end_of_file=True)
            test_expectations.commit_changes()
        self.assertIn('append_to_end_of_file is set then lineno must be 0',
                      str(ctx.exception))
Esempio n. 13
0
 def _line_from_test_and_flaky_types(self, test_name, flaky_types):
     return Expectation(tags=self.specifiers,
                        test=test_name,
                        results=flaky_types)
Esempio n. 14
0
    def write_to_test_expectations(self, test_to_results):
        """Each expectations file is browser specific, and currently only
        runs on pie. Therefore we do not need any configuration specifiers
        to anotate expectations for certain builds.

        Args:
            test_to_results: A dictionary that maps test names to another
            dictionary which maps a tuple of build configurations and to
            a test result.
        Returns:
            Dictionary mapping test names to lists of expectation strings.
        """
        browser_to_exp_path = {
            browser: PRODUCTS_TO_EXPECTATION_FILE_PATHS[product]
            for product, browser in PRODUCTS_TO_BROWSER_TAGS.items()
        }
        untriaged_exps = self._get_untriaged_test_expectations(
            self._test_expectations, self.MARKER_COMMENT)
        neverfix_tests = self._get_untriaged_test_expectations(
            self._never_fix_expectations,
            self.NEVER_FIX_MARKER_COMMENT)[ANDROID_DISABLED_TESTS]

        for path, test_exps in untriaged_exps.items():
            self._test_expectations.remove_expectations(
                path, reduce(lambda x, y: x + y, test_exps.values()))

        if neverfix_tests:
            self._never_fix_expectations.remove_expectations(
                ANDROID_DISABLED_TESTS,
                reduce(lambda x, y: x + y, neverfix_tests.values()))

        for results_test_name, platform_results in test_to_results.items():
            exps_test_name = 'external/wpt/%s' % results_test_name
            for configs, test_results in platform_results.items():
                for config in configs:
                    path = browser_to_exp_path[config.browser]
                    neverfix_exp = self._maybe_create_never_fix_expectation(
                        path, exps_test_name, ResultType.Skip
                        in test_results.actual, {config.browser.lower()})
                    if neverfix_exp:
                        neverfix_tests.setdefault(exps_test_name,
                                                  []).append(neverfix_exp)
                    else:
                        # no system specifiers are necessary because we are
                        # writing to browser specific expectations files for
                        # only one Android version.
                        unexpected_results = {
                            r
                            for r in test_results.actual.split()
                            if r not in test_results.expected.split()
                        }

                        if exps_test_name not in untriaged_exps[path]:
                            untriaged_exps[path].setdefault(
                                exps_test_name, []).append(
                                    Expectation(test=exps_test_name,
                                                reason=self.UMBRELLA_BUG,
                                                results=unexpected_results))
                        else:
                            exp = untriaged_exps[path][exps_test_name][0]
                            exp.add_expectations(unexpected_results,
                                                 reason=self.UMBRELLA_BUG)

        for path in untriaged_exps:
            marker_lineno = self._get_marker_line_number(
                self._test_expectations, path, self.MARKER_COMMENT)
            self._test_expectations.add_expectations(
                path,
                sorted([exps[0] for exps in untriaged_exps[path].values()],
                       key=lambda e: e.test), marker_lineno)

        disabled_tests_marker_lineno = self._get_marker_line_number(
            self._never_fix_expectations, ANDROID_DISABLED_TESTS,
            self.NEVER_FIX_MARKER_COMMENT)

        if neverfix_tests:
            self._never_fix_expectations.add_expectations(
                ANDROID_DISABLED_TESTS,
                sorted(reduce(lambda x, y: x + y, neverfix_tests.values()),
                       key=lambda e: e.test), disabled_tests_marker_lineno)

        self._test_expectations.commit_changes()
        self._never_fix_expectations.commit_changes()

        # TODO(rmhasan): Return dictionary mapping test names to lists of
        # test expectation strings.
        return {}
Esempio n. 15
0
    def write_to_test_expectations(self, test_to_results):
        """Each expectations file is browser specific, and currently only
        runs on pie. Therefore we do not need any configuration specifiers
        to anotate expectations for certain builds.

        Args:
            test_to_results: A dictionary that maps test names to another
            dictionary which maps a tuple of build configurations and to
            a test result.
        Returns:
            Dictionary mapping test names to lists of expectation strings.
        """
        browser_to_product = {
            browser: product
            for product, browser in PRODUCTS_TO_BROWSER_TAGS.items()
        }
        browser_to_exp_path = {
            browser: PRODUCTS_TO_EXPECTATION_FILE_PATHS[product]
            for product, browser in PRODUCTS_TO_BROWSER_TAGS.items()
        }
        product_exp_paths = {
            PRODUCTS_TO_EXPECTATION_FILE_PATHS[prod]
            for prod in self.options.android_product
        }
        untriaged_exps = self._get_untriaged_test_expectations(
            self._test_expectations, product_exp_paths, self.MARKER_COMMENT)
        neverfix_tests = self._get_untriaged_test_expectations(
            self._never_fix_expectations, [ANDROID_DISABLED_TESTS],
            self.NEVER_FIX_MARKER_COMMENT)[ANDROID_DISABLED_TESTS]

        for path, test_exps in untriaged_exps.items():
            self._test_expectations.remove_expectations(
                path, reduce(lambda x, y: x + y, list(test_exps.values())))

        if neverfix_tests:
            self._never_fix_expectations.remove_expectations(
                ANDROID_DISABLED_TESTS,
                reduce(lambda x, y: x + y, list(neverfix_tests.values())))

        exp_lines_dict_by_product = defaultdict(dict)
        for results_test_name, platform_results in test_to_results.items():
            exps_test_name = 'external/wpt/%s' % results_test_name
            for configs, test_results in platform_results.items():
                for config in configs:
                    path = browser_to_exp_path[config.browser]
                    neverfix_exp = self._maybe_create_never_fix_expectation(
                        path, exps_test_name, ResultType.Skip
                        in test_results.actual, {config.browser.lower()})
                    if neverfix_exp:
                        neverfix_tests.setdefault(exps_test_name,
                                                  []).append(neverfix_exp)
                    else:
                        # no system specifiers are necessary because we are
                        # writing to browser specific expectations files for
                        # only one Android version.
                        unexpected_results = {
                            r
                            for r in test_results.actual.split()
                            if r not in test_results.expected.split()
                        }

                        # as we are using override expectations for Android
                        # side, do not create override expectations if it is a
                        # subset of default expectations or baseline
                        default_expectation = \
                            self._get_expectations_from_baseline(results_test_name)
                        if unexpected_results.issubset(default_expectation):
                            continue

                        # Test expectations for modified test cases are already
                        # deleted, so all tests should be new test
                        expectation = Expectation(test=exps_test_name,
                                                  reason=self.UMBRELLA_BUG,
                                                  results=unexpected_results)
                        product = browser_to_product[config.browser]
                        exp_lines_dict_by_product[product][exps_test_name] = \
                            expectation.to_string()
                        untriaged_exps[path].setdefault(exps_test_name,
                                                        []).append(expectation)

        for path in untriaged_exps:
            marker_lineno = self._get_marker_line_number(
                self._test_expectations, path, self.MARKER_COMMENT)
            self._test_expectations.add_expectations(
                path,
                sorted([exps[0] for exps in untriaged_exps[path].values()],
                       key=lambda e: e.test), marker_lineno)

        disabled_tests_marker_lineno = self._get_marker_line_number(
            self._never_fix_expectations, ANDROID_DISABLED_TESTS,
            self.NEVER_FIX_MARKER_COMMENT)

        if neverfix_tests:
            self._never_fix_expectations.add_expectations(
                ANDROID_DISABLED_TESTS,
                sorted(reduce(lambda x, y: x + y,
                              list(neverfix_tests.values())),
                       key=lambda e: e.test), disabled_tests_marker_lineno)

        self._test_expectations.commit_changes()
        self._never_fix_expectations.commit_changes()

        # returns dictionary mapping product to dictionary that maps test names
        # to test expectation strings.
        return exp_lines_dict_by_product