def __rearrange_test_results__(results_layout, test_results): if results_layout == 'envbox': for e in test_results.environments: e.suites = [] e.status = Status() for suite, results in test_results.data.items(): for env in results.keys(): statuses = [ s for s in results[env].statuses if s.environment.id == env.id ] for status in statuses: env.status.tests_pass += status.tests_pass env.status.tests_skip += status.tests_skip env.status.tests_fail += status.tests_fail env.status.tests_xfail += status.tests_xfail env.suites.append((suite, statuses)) if results_layout == 'suitebox': test_results.suites = test_results.data.keys() for suite in test_results.suites: envs = [] suite.status = Status() for environment, cell in test_results.data[suite].items(): envs.append((environment, cell.statuses)) for status in cell.statuses: suite.status.tests_pass += status.tests_pass suite.status.tests_skip += status.tests_skip suite.status.tests_fail += status.tests_fail suite.status.tests_xfail += status.tests_xfail suite.environments = sorted(envs, key=lambda e: e[0].slug)
def __call__(testrun): if testrun.status_recorded: return status = defaultdict(lambda: Status(test_run=testrun)) for test in testrun.tests.all(): sid = test.suite_id if test.result is True: status[None].tests_pass = status[None].tests_pass + 1 status[sid].tests_pass = status[sid].tests_pass + 1 elif test.result is False: status[None].tests_fail = status[None].tests_fail + 1 status[sid].tests_fail = status[sid].tests_fail + 1 else: status[None].tests_skip = status[None].tests_skip + 1 status[sid].tests_skip = status[sid].tests_skip + 1 metrics = defaultdict(lambda: []) for metric in testrun.metrics.all(): sid = metric.suite_id for v in metric.measurement_list: metrics[None].append(v) metrics[sid].append(v) for sid, values in metrics.items(): status[sid].metrics_summary = geomean(values) for sid, s in status.items(): s.suite_id = sid s.save() testrun.status_recorded = True testrun.save()
def __call__(testrun): if testrun.status_recorded: return status = defaultdict(lambda: Status(test_run=testrun)) # Get number of passing tests per suite passes = testrun.tests.filter(result=True).values('suite_id').annotate( pass_count=Count('suite_id')).order_by() xfails = testrun.tests.filter( result=False, has_known_issues=True).values('suite_id').annotate( xfail_count=Count('suite_id')).order_by() fails = testrun.tests.filter(result=False).exclude( has_known_issues=True).values('suite_id').annotate( fail_count=Count('suite_id')).order_by() skips = testrun.tests.filter( result__isnull=True).values('suite_id').annotate( skip_count=Count('suite_id')).order_by() for p in passes: status[None].tests_pass += p['pass_count'] status[p['suite_id']].tests_pass += p['pass_count'] for x in xfails: status[None].tests_xfail += x['xfail_count'] status[x['suite_id']].tests_xfail += x['xfail_count'] for f in fails: status[None].tests_fail += f['fail_count'] status[f['suite_id']].tests_fail += f['fail_count'] for s in skips: status[None].tests_skip += s['skip_count'] status[s['suite_id']].tests_skip += s['skip_count'] metrics = defaultdict(lambda: []) for metric in testrun.metrics.all(): sid = metric.suite_id for v in metric.measurement_list: metrics[None].append(v) metrics[sid].append(v) # One Status has many test suites and each of one of them # has their own summary (i.e. geomean). # The status having no test suite (suite=None) represent # the TestRun's summary if len(metrics[None]): status[None].has_metrics = True for sid, values in metrics.items(): status[sid].metrics_summary = geomean(values) status[sid].has_metrics = True for sid, s in status.items(): s.suite_id = sid s.suite_version = get_suite_version(testrun, s.suite) s.save() testrun.status_recorded = True testrun.save()
def __call__(testrun): if testrun.status_recorded: return status = defaultdict(lambda: Status(test_run=testrun)) for test in testrun.tests.all(): sid = test.suite_id if test.result is True: status[None].tests_pass = status[None].tests_pass + 1 status[sid].tests_pass = status[sid].tests_pass + 1 elif test.result is False: status[None].tests_fail = status[None].tests_fail + 1 status[sid].tests_fail = status[sid].tests_fail + 1 else: status[None].tests_skip = status[None].tests_skip + 1 status[sid].tests_skip = status[sid].tests_skip + 1 metrics = defaultdict(lambda: []) for metric in testrun.metrics.all(): sid = metric.suite_id for v in metric.measurement_list: metrics[None].append(v) metrics[sid].append(v) for sid, values in metrics.items(): status[sid].metrics_summary = geomean(values) for sid, s in status.items(): s.suite_id = sid s.save() testrun.status_recorded = True testrun.save() status = ProjectStatus.create_or_update(testrun.build) if status.finished: try: notify_project_status.delay(status.id) except OSError as e: # can't request background task for some reason; log the error # and continue. # # This will happen as "OSError: [Errno 111] Connection refused" # in development environments without a running AMQP server, # but also on production setups that are not running the # background job processes because they don't need email # notifications or CI integration logger.error("Cannot schedule notification: " + str(e) + "\n" + traceback.format_exc())
def __call__(testrun): if testrun.status_recorded: return status = defaultdict(lambda: Status(test_run=testrun)) for test in testrun.tests.all(): sid = test.suite_id if test.result is True: status[None].tests_pass += 1 status[sid].tests_pass += 1 elif test.result is False: if test.known_issues.exists(): status[None].tests_xfail += 1 status[sid].tests_xfail += 1 else: status[None].tests_fail += 1 status[sid].tests_fail += 1 else: status[None].tests_skip += 1 status[sid].tests_skip += 1 metrics = defaultdict(lambda: []) for metric in testrun.metrics.all(): sid = metric.suite_id for v in metric.measurement_list: metrics[None].append(v) metrics[sid].append(v) # One Status has many test suites and each of one of them # has their own summary (i.e. geomean). # The status having no test suite (suite=None) represent # the TestRun's summary if len(metrics[None]): status[None].has_metrics = True for sid, values in metrics.items(): status[sid].metrics_summary = geomean(values) status[sid].has_metrics = True for sid, s in status.items(): s.suite_id = sid s.suite_version = get_suite_version(testrun, s.suite) s.save() testrun.status_recorded = True testrun.save()
def __call__(testrun): if testrun.status_recorded: return status = defaultdict(lambda: Status(test_run=testrun)) for test in testrun.tests.all(): sid = test.suite_id if test.result is True: status[None].tests_pass += 1 status[sid].tests_pass += 1 elif test.result is False: if test.known_issues.exists(): status[None].tests_xfail += 1 status[sid].tests_xfail += 1 else: status[None].tests_fail += 1 status[sid].tests_fail += 1 else: status[None].tests_skip += 1 status[sid].tests_skip += 1 metrics = defaultdict(lambda: []) for metric in testrun.metrics.all(): sid = metric.suite_id for v in metric.measurement_list: metrics[None].append(v) metrics[sid].append(v) for sid, values in metrics.items(): status[sid].metrics_summary = geomean(values) status[sid].has_metrics = True for sid, s in status.items(): s.suite_id = sid s.suite_version = get_suite_version(testrun, s.suite) s.save() testrun.status_recorded = True testrun.save()