Пример #1
0
    def test_basic(self):

        #   metric full name            | build1 result | build2 result | expected result
        # False -> the metric has regressed, True -> the metric has been fixed, None -> not a regression nor fix
        test_cases = {
            'suite_a/improved-metric-higher-better': (1, 2, True),
            'suite_a/improved-metric-lower-better': (2, 1, True),
            'suite_a/regressing-metric-higher-better': (2, 1, False),
            'suite_a/regressing-metric-lower-better': (1, 2, False),
            'suite_a/stable-metric': (1, 1, None),
            'suite_a/thresholdless-metric': (1, 2, None),
            'suite_a/valueness-threshold-metric': (1, 2, None),
        }

        for metric_name in test_cases.keys():
            build_a_result = test_cases[metric_name][0]
            build_b_result = test_cases[metric_name][1]
            expected = test_cases[metric_name][2]

            # Post build 1 results
            self.receive_test_run(self.project, self.build_a.version,
                                  self.environment_a.slug, {
                                      metric_name: build_a_result,
                                  })

            # Post build 2 results
            self.receive_test_run(self.project, self.build_b.version,
                                  self.environment_a.slug, {
                                      metric_name: build_b_result,
                                  })

            comparison = MetricComparison(self.build_a,
                                          self.build_b,
                                          regressions_and_fixes_only=True)
            if expected is True:
                self.assertIn(metric_name,
                              comparison.fixes[self.environment_a.slug])
            elif expected is False:
                self.assertIn(metric_name,
                              comparison.regressions[self.environment_a.slug])
            else:
                self.assertNotIn(
                    metric_name,
                    comparison.regressions[self.environment_a.slug])
                self.assertNotIn(metric_name,
                                 comparison.fixes[self.environment_a.slug])
Пример #2
0
    def different_suites(self):
        metric_name = 'different-suite-metric'
        build_a_result = 1
        build_b_result = 2

        # Post build 1 results
        self.receive_test_run(self.project, self.build_a.version,
                              self.environment_a.slug, {
                                  'suite_a/' + metric_name: build_a_result,
                              })

        # Post build 2 results
        self.receive_test_run(self.project, self.build_b.version,
                              self.environment_a.slug, {
                                  'suite_b/' + metric_name: build_b_result,
                              })

        comparison = MetricComparison(self.build_a,
                                      self.build_b,
                                      regressions_and_fixes_only=True)
        self.assertEqual(0, len(comparison.regressions))
        self.assertEqual(0, len(comparison.fixes))
Пример #3
0
 def test_empty_with_no_builds(self):
     new_project = self.group.projects.create(slug='new')
     comparison = MetricComparison.compare_projects(new_project)
     self.assertFalse(comparison.diff)
Пример #4
0
 def test_no_data(self):
     new_project = self.group.projects.create(slug='new')
     comp = MetricComparison.compare_projects(new_project)
     self.assertFalse(comp.diff)
     self.assertEqual([], comp.builds)
Пример #5
0
 def test_compare_projects(self):
     comp = MetricComparison.compare_projects(self.project1, self.project2)
     self.assertEqual([self.build1, self.build3], comp.builds)
Пример #6
0
def compare(b1, b2):
    return MetricComparison.compare_builds(b1, b2)