def __call__(testrun): if testrun.status_recorded: return status = defaultdict(lambda: Status(test_run=testrun)) # Get number of passing tests per suite passes = testrun.tests.filter(result=True).values('suite_id').annotate( pass_count=Count('suite_id')).order_by() xfails = testrun.tests.filter( result=False, has_known_issues=True).values('suite_id').annotate( xfail_count=Count('suite_id')).order_by() fails = testrun.tests.filter(result=False).exclude( has_known_issues=True).values('suite_id').annotate( fail_count=Count('suite_id')).order_by() skips = testrun.tests.filter( result__isnull=True).values('suite_id').annotate( skip_count=Count('suite_id')).order_by() for p in passes: status[None].tests_pass += p['pass_count'] status[p['suite_id']].tests_pass += p['pass_count'] for x in xfails: status[None].tests_xfail += x['xfail_count'] status[x['suite_id']].tests_xfail += x['xfail_count'] for f in fails: status[None].tests_fail += f['fail_count'] status[f['suite_id']].tests_fail += f['fail_count'] for s in skips: status[None].tests_skip += s['skip_count'] status[s['suite_id']].tests_skip += s['skip_count'] metrics = defaultdict(lambda: []) for metric in testrun.metrics.all(): sid = metric.suite_id for v in metric.measurement_list: metrics[None].append(v) metrics[sid].append(v) # One Status has many test suites and each of one of them # has their own summary (i.e. geomean). # The status having no test suite (suite=None) represent # the TestRun's summary if len(metrics[None]): status[None].has_metrics = True for sid, values in metrics.items(): status[sid].metrics_summary = geomean(values) status[sid].has_metrics = True for sid, s in status.items(): s.suite_id = sid s.suite_version = get_suite_version(testrun, s.suite) s.save() testrun.status_recorded = True testrun.save()
def __call__(testrun): if testrun.status_recorded: return status = defaultdict(lambda: Status(test_run=testrun)) for test in testrun.tests.all(): sid = test.suite_id if test.result is True: status[None].tests_pass += 1 status[sid].tests_pass += 1 elif test.result is False: if test.known_issues.exists(): status[None].tests_xfail += 1 status[sid].tests_xfail += 1 else: status[None].tests_fail += 1 status[sid].tests_fail += 1 else: status[None].tests_skip += 1 status[sid].tests_skip += 1 metrics = defaultdict(lambda: []) for metric in testrun.metrics.all(): sid = metric.suite_id for v in metric.measurement_list: metrics[None].append(v) metrics[sid].append(v) # One Status has many test suites and each of one of them # has their own summary (i.e. geomean). # The status having no test suite (suite=None) represent # the TestRun's summary if len(metrics[None]): status[None].has_metrics = True for sid, values in metrics.items(): status[sid].metrics_summary = geomean(values) status[sid].has_metrics = True for sid, s in status.items(): s.suite_id = sid s.suite_version = get_suite_version(testrun, s.suite) s.save() testrun.status_recorded = True testrun.save()
def __call__(testrun): if testrun.status_recorded: return status = defaultdict(lambda: Status(test_run=testrun)) for test in testrun.tests.all(): sid = test.suite_id if test.result is True: status[None].tests_pass += 1 status[sid].tests_pass += 1 elif test.result is False: if test.known_issues.exists(): status[None].tests_xfail += 1 status[sid].tests_xfail += 1 else: status[None].tests_fail += 1 status[sid].tests_fail += 1 else: status[None].tests_skip += 1 status[sid].tests_skip += 1 metrics = defaultdict(lambda: []) for metric in testrun.metrics.all(): sid = metric.suite_id for v in metric.measurement_list: metrics[None].append(v) metrics[sid].append(v) for sid, values in metrics.items(): status[sid].metrics_summary = geomean(values) status[sid].has_metrics = True for sid, s in status.items(): s.suite_id = sid s.suite_version = get_suite_version(testrun, s.suite) s.save() testrun.status_recorded = True testrun.save()