Example #1
0
    def test_update_build_summary(self):
        values1 = [1, 2, 3, 4, 5]
        values2 = [2, 4, 6, 8]
        new_test_run = self.build1.test_runs.create(environment=self.env1)
        new_test_run.metrics.create(name='new_foo', suite=self.suite1, result=5)

        self.receive_testrun(self.build1.version, self.env1.slug, tests_file='{"suite1/new_foo": "pass"}')

        summary1 = BuildSummary.create_or_update(self.build1, self.env1)
        summary2 = BuildSummary.create_or_update(self.build1, self.env2)

        self.assertTrue(summary1.has_metrics)
        self.assertTrue(eq(geomean(values1), summary1.metrics_summary))
        self.assertTrue(summary2.has_metrics)
        self.assertTrue(eq(geomean(values2), summary2.metrics_summary))

        self.assertEqual(5, summary1.tests_total)
        self.assertEqual(2, summary1.tests_pass)
        self.assertEqual(1, summary1.tests_fail)
        self.assertEqual(1, summary1.tests_skip)
        self.assertEqual(1, summary1.tests_xfail)

        self.assertEqual(3, summary2.tests_total)
        self.assertEqual(1, summary2.tests_pass)
        self.assertEqual(1, summary2.tests_fail)
        self.assertEqual(1, summary2.tests_skip)
        self.assertEqual(0, summary2.tests_xfail)
Example #2
0
    def test_environment_summary(self):
        values1 = [1, 2, 3, 4]
        values2 = [2, 4, 6, 8]
        summary1 = MetricsSummary(self.build1, self.env1)
        summary2 = MetricsSummary(self.build1, self.env2)

        self.assertTrue(summary1.has_metrics)
        self.assertTrue(summary2.has_metrics)
        self.assertTrue(eq(geomean(values1), summary1.value))
        self.assertTrue(eq(geomean(values2), summary2.value))
Example #3
0
    def __call__(testrun):
        if testrun.status_recorded:
            return

        status = defaultdict(lambda: Status(test_run=testrun))

        for test in testrun.tests.all():
            sid = test.suite_id
            if test.result is True:
                status[None].tests_pass = status[None].tests_pass + 1
                status[sid].tests_pass = status[sid].tests_pass + 1
            elif test.result is False:
                status[None].tests_fail = status[None].tests_fail + 1
                status[sid].tests_fail = status[sid].tests_fail + 1
            else:
                status[None].tests_skip = status[None].tests_skip + 1
                status[sid].tests_skip = status[sid].tests_skip + 1

        metrics = defaultdict(lambda: [])
        for metric in testrun.metrics.all():
            sid = metric.suite_id
            for v in metric.measurement_list:
                metrics[None].append(v)
                metrics[sid].append(v)

        for sid, values in metrics.items():
            status[sid].metrics_summary = geomean(values)

        for sid, s in status.items():
            s.suite_id = sid
            s.save()

        testrun.status_recorded = True
        testrun.save()
def create_or_update_projectstatus(apps, schema_editor):
    ProjectStatus = apps.get_model('core', 'ProjectStatus')
    Build = apps.get_model('core', 'Build')
    Metric = apps.get_model('core', 'Metric')
    for build in Build.objects.order_by('datetime').prefetch_related('status'):
        try:
            status = build.status
        except ProjectStatus.DoesNotExist:
            previous = ProjectStatus.objects.filter(
                build__project=build.project,
                build__datetime__lt=build.datetime,
            ).last()
            status = ProjectStatus(build=build, previous=previous)

        metrics = Metric.objects.filter(test_run__build_id=build.id).all()
        metrics_summary = geomean([m.result for m in metrics])

        test_summary = TestSummary(build)

        status.tests_pass = test_summary.tests_pass
        status.tests_fail = test_summary.tests_fail
        status.tests_skip = test_summary.tests_skip
        status.metrics_summary = metrics_summary
        status.last_updated = timezone.now()
        status.save()
Example #5
0
    def __call__(testrun):
        if testrun.status_recorded:
            return

        status = defaultdict(lambda: Status(test_run=testrun))

        # Get number of passing tests per suite
        passes = testrun.tests.filter(result=True).values('suite_id').annotate(
            pass_count=Count('suite_id')).order_by()
        xfails = testrun.tests.filter(
            result=False, has_known_issues=True).values('suite_id').annotate(
                xfail_count=Count('suite_id')).order_by()
        fails = testrun.tests.filter(result=False).exclude(
            has_known_issues=True).values('suite_id').annotate(
                fail_count=Count('suite_id')).order_by()
        skips = testrun.tests.filter(
            result__isnull=True).values('suite_id').annotate(
                skip_count=Count('suite_id')).order_by()

        for p in passes:
            status[None].tests_pass += p['pass_count']
            status[p['suite_id']].tests_pass += p['pass_count']

        for x in xfails:
            status[None].tests_xfail += x['xfail_count']
            status[x['suite_id']].tests_xfail += x['xfail_count']

        for f in fails:
            status[None].tests_fail += f['fail_count']
            status[f['suite_id']].tests_fail += f['fail_count']

        for s in skips:
            status[None].tests_skip += s['skip_count']
            status[s['suite_id']].tests_skip += s['skip_count']

        metrics = defaultdict(lambda: [])
        for metric in testrun.metrics.all():
            sid = metric.suite_id
            for v in metric.measurement_list:
                metrics[None].append(v)
                metrics[sid].append(v)

        # One Status has many test suites and each of one of them
        # has their own summary (i.e. geomean).
        # The status having no test suite (suite=None) represent
        # the TestRun's summary
        if len(metrics[None]):
            status[None].has_metrics = True
        for sid, values in metrics.items():
            status[sid].metrics_summary = geomean(values)
            status[sid].has_metrics = True

        for sid, s in status.items():
            s.suite_id = sid
            s.suite_version = get_suite_version(testrun, s.suite)
            s.save()

        testrun.status_recorded = True
        testrun.save()
Example #6
0
 def __init__(self, build, environment=None):
     queryset = Metric.objects.filter(test_run__build_id=build.id)
     if environment:
         queryset = queryset.filter(test_run__environment_id=environment.id)
     metrics = queryset.all()
     values = [m.result for m in metrics]
     self.value = geomean(values)
     self.has_metrics = len(values) > 0
Example #7
0
    def test_basic_build_summary(self):
        values1 = [1, 2, 3, 4]
        values2 = [2, 4, 6, 8]
        summary1 = BuildSummary.create_or_update(self.build1, self.env1)
        summary2 = BuildSummary.create_or_update(self.build1, self.env2)

        self.assertTrue(summary1.has_metrics)
        self.assertTrue(summary2.has_metrics)
        self.assertTrue(eq(geomean(values1), summary1.metrics_summary))
        self.assertTrue(eq(geomean(values2), summary2.metrics_summary))

        self.assertEqual(4, summary1.tests_total)
        self.assertEqual(1, summary1.tests_pass)
        self.assertEqual(1, summary1.tests_fail)
        self.assertEqual(1, summary1.tests_skip)
        self.assertEqual(1, summary1.tests_xfail)

        self.assertEqual(3, summary2.tests_total)
        self.assertEqual(1, summary2.tests_pass)
        self.assertEqual(1, summary2.tests_fail)
        self.assertEqual(1, summary2.tests_skip)
        self.assertEqual(0, summary2.tests_xfail)
Example #8
0
    def __call__(testrun):
        if testrun.status_recorded:
            return

        status = defaultdict(lambda: Status(test_run=testrun))

        for test in testrun.tests.all():
            sid = test.suite_id
            if test.result is True:
                status[None].tests_pass = status[None].tests_pass + 1
                status[sid].tests_pass = status[sid].tests_pass + 1
            elif test.result is False:
                status[None].tests_fail = status[None].tests_fail + 1
                status[sid].tests_fail = status[sid].tests_fail + 1
            else:
                status[None].tests_skip = status[None].tests_skip + 1
                status[sid].tests_skip = status[sid].tests_skip + 1

        metrics = defaultdict(lambda: [])
        for metric in testrun.metrics.all():
            sid = metric.suite_id
            for v in metric.measurement_list:
                metrics[None].append(v)
                metrics[sid].append(v)

        for sid, values in metrics.items():
            status[sid].metrics_summary = geomean(values)

        for sid, s in status.items():
            s.suite_id = sid
            s.save()

        testrun.status_recorded = True
        testrun.save()

        status = ProjectStatus.create_or_update(testrun.build)
        if status.finished:
            try:
                notify_project_status.delay(status.id)
            except OSError as e:
                # can't request background task for some reason; log the error
                # and continue.
                #
                # This will happen as "OSError: [Errno 111] Connection refused"
                # in development environments without a running AMQP server,
                # but also on production setups that are not running the
                # background job processes because they don't need email
                # notifications or CI integration
                logger.error("Cannot schedule notification: " + str(e) + "\n" +
                             traceback.format_exc())
Example #9
0
def get_dynamic_summary(project, environments, metrics, date_start, date_end):
    entry = {}
    filters = []
    if not metrics:
        for env in environments:
            entry[env] = []
        return entry
    for m in metrics:
        suite, metric = parse_name(m)
        filters.append(Q(suite__slug=suite) & Q(name=metric))
    metric_filter = reduce(lambda x, y: x | y, filters)

    data = models.Metric.objects.filter(
        test_run__build__project=project,
        test_run__environment__slug__in=environments,
        test_run__created_at__range=(date_start, date_end),
    ).filter(metric_filter).prefetch_related(
        'test_run',
        'test_run__environment',
        'test_run__build',
        'test_run__build__annotation',
    ).order_by('test_run__environment__id', 'test_run__build__id')

    for environment, metrics_by_environment in groupby(
            data, lambda m: m.test_run.environment):
        envdata = []
        metrics_by_build = groupby(metrics_by_environment,
                                   lambda m: m.test_run.build)
        for build, metric_list in metrics_by_build:
            values = []
            for metric in metric_list:
                if not metric.is_outlier:
                    values = values + metric.measurement_list
            try:
                description = build.annotation.description
            except ObjectDoesNotExist:
                description = ""
            envdata.append([
                build.datetime.timestamp(),
                geomean(values),
                build.version,
                description,
            ])
        entry[environment.slug] = sorted(envdata, key=(lambda e: e[0]))

    return entry
Example #10
0
    def __call__(testrun):
        if testrun.status_recorded:
            return

        status = defaultdict(lambda: Status(test_run=testrun))

        for test in testrun.tests.all():
            sid = test.suite_id
            if test.result is True:
                status[None].tests_pass += 1
                status[sid].tests_pass += 1
            elif test.result is False:
                if test.known_issues.exists():
                    status[None].tests_xfail += 1
                    status[sid].tests_xfail += 1
                else:
                    status[None].tests_fail += 1
                    status[sid].tests_fail += 1
            else:
                status[None].tests_skip += 1
                status[sid].tests_skip += 1

        metrics = defaultdict(lambda: [])
        for metric in testrun.metrics.all():
            sid = metric.suite_id
            for v in metric.measurement_list:
                metrics[None].append(v)
                metrics[sid].append(v)

        # One Status has many test suites and each of one of them
        # has their own summary (i.e. geomean).
        # The status having no test suite (suite=None) represent
        # the TestRun's summary
        if len(metrics[None]):
            status[None].has_metrics = True
        for sid, values in metrics.items():
            status[sid].metrics_summary = geomean(values)
            status[sid].has_metrics = True

        for sid, s in status.items():
            s.suite_id = sid
            s.suite_version = get_suite_version(testrun, s.suite)
            s.save()

        testrun.status_recorded = True
        testrun.save()
Example #11
0
    def __call__(testrun):
        if testrun.status_recorded:
            return

        status = defaultdict(lambda: Status(test_run=testrun))

        for test in testrun.tests.all():
            sid = test.suite_id
            if test.result is True:
                status[None].tests_pass += 1
                status[sid].tests_pass += 1
            elif test.result is False:
                if test.known_issues.exists():
                    status[None].tests_xfail += 1
                    status[sid].tests_xfail += 1
                else:
                    status[None].tests_fail += 1
                    status[sid].tests_fail += 1
            else:
                status[None].tests_skip += 1
                status[sid].tests_skip += 1

        metrics = defaultdict(lambda: [])
        for metric in testrun.metrics.all():
            sid = metric.suite_id
            for v in metric.measurement_list:
                metrics[None].append(v)
                metrics[sid].append(v)

        for sid, values in metrics.items():
            status[sid].metrics_summary = geomean(values)
            status[sid].has_metrics = True

        for sid, s in status.items():
            s.suite_id = sid
            s.suite_version = get_suite_version(testrun, s.suite)
            s.save()

        testrun.status_recorded = True
        testrun.save()
Example #12
0
    def __call__(testrun):
        if testrun.status_recorded:
            return

        status = defaultdict(lambda: Status(test_run=testrun))

        for test in testrun.tests.all():
            sid = test.suite_id
            if test.result is True:
                status[None].tests_pass += 1
                status[sid].tests_pass += 1
            elif test.result is False:
                if test.known_issues.exists():
                    status[None].tests_xfail += 1
                    status[sid].tests_xfail += 1
                else:
                    status[None].tests_fail += 1
                    status[sid].tests_fail += 1
            else:
                status[None].tests_skip += 1
                status[sid].tests_skip += 1

        metrics = defaultdict(lambda: [])
        for metric in testrun.metrics.all():
            sid = metric.suite_id
            for v in metric.measurement_list:
                metrics[None].append(v)
                metrics[sid].append(v)

        for sid, values in metrics.items():
            status[sid].metrics_summary = geomean(values)
            status[sid].has_metrics = True

        for sid, s in status.items():
            s.suite_id = sid
            s.suite_version = get_suite_version(testrun, s.suite)
            s.save()

        testrun.status_recorded = True
        testrun.save()
 def test_empty_set(self):
     self.assertAlmostEqual(0, geomean([]))
Example #14
0
 def __init__(self, build):
     metrics = Metric.objects.filter(test_run__build_id=build.id).all()
     values = [m.result for m in metrics]
     self.value = geomean(values)
     self.has_metrics = len(values) > 0
 def test_set_with_only_invalid_values(self):
     self.assertAlmostEqual(0, geomean([0]))
Example #16
0
    def test_basic_summary(self):
        values = [1, 2, 3, 4, 2, 4, 6, 8]
        summary = MetricsSummary(self.build1)

        self.assertTrue(summary.has_metrics)
        self.assertTrue(eq(geomean(values), summary.value))
 def test_basic(self):
     self.assertAlmostEqual(3.1622, geomean([1, 10]), 3)
Example #18
0
 def test_basic(self):
     self.assertAlmostEqual(3.1622, geomean([1, 10]), 3)
Example #19
0
 def test_set_with_only_invalid_values(self):
     self.assertAlmostEqual(0, geomean([0]))
Example #20
0
 def test_empty_set(self):
     self.assertAlmostEqual(0, geomean([]))
Example #21
0
 def test_exclude_negative_numbers(self):
     self.assertAlmostEqual(4, geomean([4, -1, 4]))
Example #22
0
 def test_exclude_zeroes(self):
     self.assertAlmostEqual(4, geomean([4, 0, 4]))
 def test_exclude_negative_numbers(self):
     self.assertAlmostEqual(4, geomean([4, -1, 4]))
 def test_exclude_zeroes(self):
     self.assertAlmostEqual(4, geomean([4, 0, 4]))
Example #25
0
 def __init__(self, build):
     metrics = Metric.objects.filter(test_run__build_id=build.id).all()
     values = [m.result for m in metrics]
     self.value = geomean(values)
     self.has_metrics = len(values) > 0