示例#1
0
def get(request, group_slug, project_slug):
    group = get_object_or_404(models.Group, slug=group_slug)
    project = get_object_or_404(group.projects, slug=project_slug)

    metrics = request.GET.getlist('metric')
    # If the metrics parameter is not present, return data for all metrics.
    if not metrics:
        metric_set = models.Metric.objects.filter(
            test_run__environment__project=project).values(
                'suite__slug', 'name').order_by('suite__slug',
                                                'name').distinct()

        metrics = [":tests:"]
        metrics += [join_name(m['suite__slug'], m['name']) for m in metric_set]

    environments = request.GET.getlist('environment')

    results = get_metric_data(project, metrics, environments)

    fmt = request.GET.get('format', 'json')
    if fmt == 'json':
        return HttpResponse(json.dumps(results),
                            content_type='application/json; charset=utf-8')
    elif fmt == 'csv':
        return HttpResponse(export_csv(results),
                            content_type='test/csv; charset=utf-8')
    else:
        return HttpResponseBadRequest("Invalid format: %s" % fmt)
示例#2
0
def metrics(request, group_slug, project_slug):
    group = Group.objects.get(slug=group_slug)
    project = group.projects.get(slug=project_slug)

    environments = [{
        "name": e.slug
    } for e in project.environments.order_by('id').all()]

    metric_set = Metric.objects.filter(
        test_run__environment__project=project).values(
            'suite__slug', 'name').order_by('suite__slug', 'name').distinct()

    metrics = [{
        "name": ":tests:",
        "label": "Test pass %",
        "max": 100,
        "min": 0
    }]
    metrics += [{
        "name": join_name(m['suite__slug'], m['name'])
    } for m in metric_set]

    data = get_metric_data(project, request.GET.getlist('metric'),
                           request.GET.getlist('environment'))

    context = {
        "project": project,
        "environments": environments,
        "metrics": metrics,
        "data": data,
    }
    return render(request, 'squad/metrics.html', context)
示例#3
0
def get_metrics_list(project):

    metric_set = Metric.objects.filter(
        test_run__environment__project=project).values(
            'suite__slug', 'name').order_by('suite__slug', 'name').distinct()

    metrics = [{
        "name": ":summary:",
        "label": _("Summary of all metrics per build")
    }]
    metrics += [{
        "name": ":dynamic_summary:",
        "label": _("Summary of selected metrics"),
        "dynamic": "yes"
    }]
    metrics += [{
        "name": ":tests:",
        "label": _("Test pass %"),
        "max": 100,
        "min": 0
    }]
    metrics += [{
        "name": join_name(m['suite__slug'], m['name'])
    } for m in metric_set]
    return metrics
示例#4
0
def get_metrics_list(project):
    unique_names = set()

    testruns = TestRun.objects.filter(
        environment__project=project).values('id').order_by('id')
    test_runs_ids = [tr['id'] for tr in testruns]
    for chunk in split_list(test_runs_ids, chunk_size=100):
        metric_set = Metric.objects.filter(test_run_id__in=chunk).values(
            'suite__slug', 'name')
        for m in metric_set:
            unique_names.add(join_name(m['suite__slug'], m['name']))

    metric_names = [{"name": name} for name in sorted(unique_names)]

    metrics = [{
        "name": ":summary:",
        "label": _("Summary of all metrics per build")
    }]
    metrics += [{
        "name": ":dynamic_summary:",
        "label": _("Summary of selected metrics"),
        "dynamic": "yes"
    }]
    metrics += [{
        "name": ":tests:",
        "label": _("Test pass %"),
        "max": 100,
        "min": 0
    }]
    metrics += metric_names
    return metrics
示例#5
0
 def __extract_stats__(self, query):
     stats = []
     for environment_slug, builds in groupby(
             query, lambda x: x['test_run__environment__slug']):
         for build_id, suites in groupby(builds,
                                         lambda x: x['test_run__build_id']):
             for suite_slug, metrics in groupby(suites,
                                                lambda x: x['suite__slug']):
                 for metric_name, measurements in groupby(
                         metrics, lambda x: x['name']):
                     values = []
                     for m in measurements:
                         values += [
                             float(v) for v in m['measurements'].split(',')
                         ]
                     stat = {
                         'environment_slug': environment_slug,
                         'build_id': build_id,
                         'full_name': join_name(suite_slug, metric_name),
                         'mean': statistics.mean(values),
                         'stddev': statistics.pstdev(values),
                         'count': len(values)
                     }
                     stats.append(stat)
     return stats
示例#6
0
    def __extract_test_results__(self, test_runs_ids):
        self.__failures__ = OrderedDict()

        tests = models.Test.objects.filter(
            test_run_id__in=test_runs_ids.keys()).annotate(suite_slug=F(
                'suite__slug'), ).prefetch_related('metadata').defer('log')

        for test in tests:
            build, env = test_runs_ids.get(test.test_run_id)

            full_name = join_name(test.suite_slug, test.name)
            if full_name not in self.results:
                self.results[full_name] = OrderedDict()

            key = (build, env)
            if key in self.results[full_name]:  # Duplicate found.
                if not isinstance(self.results[full_name][key], tuple):
                    # Test confidence is NOT already caclulated.
                    self.results[full_name][key] = test_confidence(test)
            else:
                self.results[full_name][key] = test.status

            if test.has_known_issues:
                self.tests_with_issues[test.id] = (full_name, env)

            if test.status == 'fail' and build.id == self.builds[-1].id:
                if env not in self.__failures__:
                    self.__failures__[env] = []
                self.__failures__[env].append(test)
示例#7
0
def __get_metrics_list__(project):

    metric_set = Metric.objects.filter(
        test_run__environment__project=project
    ).values('suite__slug', 'name').order_by('suite__slug', 'name').distinct()

    metrics = [{"name": ":tests:", "label": "Test pass %", "max": 100, "min": 0}]
    metrics += [{"name": join_name(m['suite__slug'], m['name'])} for m in metric_set]
    return metrics
示例#8
0
文件: queries.py 项目: Linaro/squad
def get_metrics_list(project):

    metric_set = Metric.objects.filter(
        test_run__environment__project=project
    ).values('suite__slug', 'name').order_by('suite__slug', 'name').distinct()

    metrics = [{"name": ":tests:", "label": "Test pass %", "max": 100, "min": 0}]
    metrics += [{"name": join_name(m['suite__slug'], m['name'])} for m in metric_set]
    return metrics
示例#9
0
    def __call__(test_run):
        if test_run.data_processed:
            return

        issues = {}
        for issue in KnownIssue.active_by_environment(test_run.environment):
            issues.setdefault(issue.test_name, [])
            issues[issue.test_name].append(issue)

        for test in test_parser()(test_run.tests_file):
            # TODO: remove check below when test_name size changes in the schema
            if len(test['test_name']) > 256:
                continue
            suite = get_suite(
                test_run,
                test['group_name']
            )
            metadata, _ = SuiteMetadata.objects.get_or_create(suite=suite.slug, name=test['test_name'], kind='test')
            full_name = join_name(suite.slug, test['test_name'])
            test_issues = issues.get(full_name, [])
            test_obj = Test.objects.create(
                test_run=test_run,
                suite=suite,
                metadata=metadata,
                result=test['pass'],
                log=test['log'],
                has_known_issues=bool(test_issues),
                build=test_run.build,
                environment=test_run.environment,
            )
            for issue in test_issues:
                test_obj.known_issues.add(issue)

        for metric in metric_parser()(test_run.metrics_file):
            # TODO: remove check below when test_name size changes in the schema
            if len(metric['name']) > 256:
                continue
            suite = get_suite(
                test_run,
                metric['group_name']
            )
            metadata, _ = SuiteMetadata.objects.get_or_create(suite=suite.slug, name=metric['name'], kind='metric')
            Metric.objects.create(
                test_run=test_run,
                suite=suite,
                metadata=metadata,
                name=metric['name'],
                result=metric['result'],
                measurements=','.join([str(m) for m in metric['measurements']]),
                unit=metric['unit'],
            )

        test_run.data_processed = True
        test_run.save()
示例#10
0
 def __all_tests__(self):
     data = Test.objects.filter(
         test_run__build__project__in=[b.project for b in self.builds]
     ).order_by(
         'suite__slug',
         'name',
     ).values(
         'suite__slug',
         'name',
     ).distinct()
     return sorted([join_name(item['suite__slug'], item['name']) for item in data])
示例#11
0
 def __extract_test_results__(self, test_run):
     tests = test_run.tests.annotate(suite_slug=F('suite__slug'))
     for test in tests.iterator():
         key = (test_run.build, str(test_run.environment))
         test_full_name = join_name(test.suite_slug, test.name)
         if test_full_name not in self.results:
             self.results[test_full_name] = OrderedDict()
         self.results[test_full_name][key] = test.status
         if test.has_known_issues:
             for issue in test.known_issues.all():
                 if issue.intermittent:
                     env = str(test_run.environment)
                     self.__intermittent__[(test_full_name, env)] = True
示例#12
0
def get(request, group_slug, project_slug):
    project = request.project

    metrics = request.GET.getlist('metric')
    date_start = request.GET.get('date_start', None)
    date_end = request.GET.get('date_end', None)

    if date_start:
        try:
            date_start = datetime.strptime(date_start, "%m-%d-%Y")
        except ValueError:
            return HttpResponseBadRequest(
                "Invalid date_start format: %s. Try using %s notation." %
                (date_start, "%m-%d-%Y"))
    if date_end:
        try:
            date_end = datetime.strptime(date_end, "%m-%d-%Y")
        except ValueError:
            return HttpResponseBadRequest(
                "Invalid date_end format: %s. Try using %s notation." %
                (date_end, "%m-%d-%Y"))

    # If the metrics parameter is not present, return data for all metrics.
    if not metrics:
        metric_set = models.Metric.objects.filter(
            environment__project=project).values(
                'suite__slug',
                'metadata__name').order_by('suite__slug',
                                           'metadata__name').distinct()

        metrics = [":tests:"]
        metrics += [
            join_name(m['suite__slug'], m['metadata__name'])
            for m in metric_set
        ]

    environments = project.environments.filter(
        slug__in=request.GET.getlist('environment'))

    results = get_metric_data(project, metrics, environments, date_start,
                              date_end)

    fmt = request.GET.get('format', 'json')
    if fmt == 'json':
        return HttpResponse(json.dumps(results),
                            content_type='application/json; charset=utf-8')
    elif fmt == 'csv':
        return HttpResponse(export_csv(results),
                            content_type='test/csv; charset=utf-8')
    else:
        return HttpResponseBadRequest("Invalid format: %s" % fmt)
示例#13
0
 def __extract_test_results__(self, test_run):
     tests = test_run.tests.annotate(
         suite_slug=F('suite__slug')
     )
     for test in tests.iterator():
         key = (test_run.build, str(test_run.environment))
         test_full_name = join_name(test.suite_slug, test.name)
         if test_full_name not in self.results:
             self.results[test_full_name] = OrderedDict()
         self.results[test_full_name][key] = test.status
         if test.has_known_issues:
             for issue in test.known_issues.all():
                 if issue.intermittent:
                     env = str(test_run.environment)
                     self.__intermittent__[(test_full_name, env)] = True
示例#14
0
def get_metrics_list(project):
    unique_names = set()

    envs_ids = project.environments.values_list('id', flat=True)
    metric_set = Metric.objects.filter(environment_id__in=list(envs_ids)).only('metadata_id').distinct().values_list('metadata_id', flat=True)
    names = SuiteMetadata.objects.filter(id__in=list(metric_set)).only('suite', 'name')
    for n in names:
        unique_names.add(join_name(n.suite, n.name))

    metric_names = [{"name": name} for name in sorted(unique_names)]

    metrics = [{"name": ":summary:", "label": _("Summary of all metrics per build")}]
    metrics += [{"name": ":dynamic_summary:", "label": _("Summary of selected metrics"), "dynamic": "yes"}]
    metrics += [{"name": ":tests:", "label": _("Test pass %"), "max": 100, "min": 0}]
    metrics += metric_names
    return metrics
示例#15
0
文件: data.py 项目: Linaro/squad
def get(request, group_slug, project_slug):
    project = request.project

    metrics = request.GET.getlist('metric')
    date_start = request.GET.get('date_start', None)
    date_end = request.GET.get('date_end', None)

    if date_start:
        try:
            date_start = datetime.strptime(date_start, "%m-%d-%Y")
        except ValueError:
            return HttpResponseBadRequest("Invalid date_start format: %s. Try using %s notation." % (date_start, "%m-%d-%Y"))
    if date_end:
        try:
            date_end = datetime.strptime(date_end, "%m-%d-%Y")
        except ValueError:
            return HttpResponseBadRequest(
                "Invalid date_end format: %s. Try using %s notation." % (date_end, "%m-%d-%Y"))

    # If the metrics parameter is not present, return data for all metrics.
    if not metrics:
        metric_set = models.Metric.objects.filter(
            test_run__environment__project=project
        ).values('suite__slug', 'name').order_by('suite__slug', 'name').distinct()

        metrics = [":tests:"]
        metrics += [join_name(m['suite__slug'], m['name']) for m in metric_set]

    environments = request.GET.getlist('environment')

    results = get_metric_data(project, metrics, environments,
                              date_start, date_end)

    fmt = request.GET.get('format', 'json')
    if fmt == 'json':
        return HttpResponse(
            json.dumps(results),
            content_type='application/json; charset=utf-8'
        )
    elif fmt == 'csv':
        return HttpResponse(
            export_csv(results),
            content_type='test/csv; charset=utf-8'
        )
    else:
        return HttpResponseBadRequest("Invalid format: %s" % fmt)
示例#16
0
文件: __init__.py 项目: Linaro/squad
    def __call__(test_run):
        if test_run.data_processed:
            return

        issues = {}
        for issue in KnownIssue.active_by_environment(test_run.environment):
            issues.setdefault(issue.test_name, [])
            issues[issue.test_name].append(issue)

        for test in test_parser()(test_run.tests_file):
            suite = get_suite(
                test_run,
                test['group_name']
            )
            metadata, _ = SuiteMetadata.objects.get_or_create(suite=suite.slug, name=test['test_name'], kind='test')
            full_name = join_name(suite.slug, test['test_name'])
            test_issues = issues.get(full_name, [])
            test = Test.objects.create(
                test_run=test_run,
                suite=suite,
                metadata=metadata,
                name=test['test_name'],
                result=test['pass'],
                has_known_issues=bool(test_issues),
            )
            for issue in test_issues:
                test.known_issues.add(issue)

        for metric in metric_parser()(test_run.metrics_file):
            suite = get_suite(
                test_run,
                metric['group_name']
            )
            metadata, _ = SuiteMetadata.objects.get_or_create(suite=suite.slug, name=metric['name'], kind='metric')
            Metric.objects.create(
                test_run=test_run,
                suite=suite,
                metadata=metadata,
                name=metric['name'],
                result=metric['result'],
                measurements=','.join([str(m) for m in metric['measurements']]),
            )

        test_run.data_processed = True
        test_run.save()
示例#17
0
文件: __init__.py 项目: weblate/squad
    def __call__(test_run):
        if test_run.data_processed:
            return

        issues = {}
        for issue in KnownIssue.active_by_environment(test_run.environment):
            issues.setdefault(issue.test_name, [])
            issues[issue.test_name].append(issue)

        for test in test_parser()(test_run.tests_file):
            suite = get_suite(test_run, test['group_name'])
            metadata, _ = SuiteMetadata.objects.get_or_create(
                suite=suite.slug, name=test['test_name'], kind='test')
            full_name = join_name(suite.slug, test['test_name'])
            test_issues = issues.get(full_name, [])
            test = Test.objects.create(
                test_run=test_run,
                suite=suite,
                metadata=metadata,
                name=test['test_name'],
                result=test['pass'],
                has_known_issues=bool(test_issues),
            )
            for issue in test_issues:
                test.known_issues.add(issue)

        for metric in metric_parser()(test_run.metrics_file):
            suite = get_suite(test_run, metric['group_name'])
            metadata, _ = SuiteMetadata.objects.get_or_create(
                suite=suite.slug, name=metric['name'], kind='metric')
            Metric.objects.create(
                test_run=test_run,
                suite=suite,
                metadata=metadata,
                name=metric['name'],
                result=metric['result'],
                measurements=','.join([str(m)
                                       for m in metric['measurements']]),
            )

        test_run.data_processed = True
        test_run.save()
示例#18
0
    def __extract_test_results__(self, test_runs_ids):
        tests = models.Test.objects.filter(
            test_run_id__in=test_runs_ids.keys()).annotate(
                suite_slug=F('suite__slug'), ).defer('log', 'metadata')

        for test in tests:
            build, env = test_runs_ids.get(test.test_run_id)

            full_name = join_name(test.suite_slug, test.name)
            if full_name not in self.results:
                self.results[full_name] = OrderedDict()

            key = (build, env)
            self.results[full_name][key] = test.status

            if test.has_known_issues:
                self.tests_with_issues[test.id] = (full_name, env)

            if test.status == 'fail' and build.id == self.builds[-1].id:
                if env not in self.__failures__:
                    self.__failures__[env] = []
                self.__failures__[env].append(test)
示例#19
0
 def test_join_ungrouped(self):
     self.assertEqual('foo', join_name('/', 'foo'))
示例#20
0
    def __call__(test_run):
        if test_run.data_processed:
            return

        issues = {}
        for issue in KnownIssue.active_by_environment(test_run.environment):
            issues.setdefault(issue.test_name, [])
            issues[issue.test_name].append(issue)

        # Issues' test_name should be interpreted as regexes
        # so compile them prior to matching against test names
        # The * character should be replaced by .*?, which is regex for "everything"
        issues_regex = {}
        for test_name_regex in issues.keys():
            pattern = re.escape(test_name_regex).replace('\\*', '.*?')
            regex = re.compile(pattern)
            issues_regex[regex] = issues[test_name_regex]

        for test in test_parser()(test_run.tests_file):
            # TODO: remove check below when test_name size changes in the schema
            if len(test['test_name']) > 256:
                continue
            suite = get_suite(test_run, test['group_name'])
            metadata, _ = SuiteMetadata.objects.get_or_create(
                suite=suite.slug, name=test['test_name'], kind='test')
            full_name = join_name(suite.slug, test['test_name'])

            test_issues = list(
                itertools.chain(*[
                    issue for regex, issue in issues_regex.items()
                    if regex.match(full_name)
                ]))

            test_obj = Test.objects.create(
                test_run=test_run,
                suite=suite,
                metadata=metadata,
                result=test['pass'],
                log=test['log'],
                has_known_issues=bool(test_issues),
                build=test_run.build,
                environment=test_run.environment,
            )
            for issue in test_issues:
                test_obj.known_issues.add(issue)

        for metric in metric_parser()(test_run.metrics_file):
            # TODO: remove check below when test_name size changes in the schema
            if len(metric['name']) > 256:
                continue
            suite = get_suite(test_run, metric['group_name'])
            metadata, _ = SuiteMetadata.objects.get_or_create(
                suite=suite.slug, name=metric['name'], kind='metric')
            Metric.objects.create(
                test_run=test_run,
                suite=suite,
                metadata=metadata,
                result=metric['result'],
                measurements=','.join([str(m)
                                       for m in metric['measurements']]),
                unit=metric['unit'],
                build=test_run.build,
                environment=test_run.environment,
            )

        test_run.data_processed = True
        test_run.save()
示例#21
0
 def full_name(self):
     return join_name(self.suite.slug, self.name)
示例#22
0
 def __str__(self):
     if self.name == '-':
         return self.suite
     else:
         return join_name(self.suite, self.name)
示例#23
0
def create_testcase_tests(test_case_string_storage_id, atomic_test_suite_name,
                          testrun_id, suite_id):
    test_case_string = None
    scratch_object = None
    try:
        scratch_object = PluginScratch.objects.get(
            pk=test_case_string_storage_id)
        test_case_string = scratch_object.storage
    except PluginScratch.DoesNotExist:
        logger.warning("PluginScratch with ID: %s doesn't exist" %
                       test_case_string_storage_id)
        return

    test_case = ET.fromstring(test_case_string)
    testrun = TestRun.objects.get(pk=testrun_id)
    suite = Suite.objects.get(pk=suite_id)
    local_status = {
        'tests_pass': 0,
        'tests_xfail': 0,
        'tests_fail': 0,
        'tests_skip': 0
    }
    issues = {}
    for issue in KnownIssue.active_by_environment(testrun.environment):
        issues.setdefault(issue.test_name, [])
        issues[issue.test_name].append(issue)

    test_case_name = test_case.get("name")
    tests = test_case.findall('.//Test')
    logger.debug("Extracting TestCase: {test_case_name}".format(
        test_case_name=test_case_name))
    logger.debug("Adding {} testcases".format(len(tests)))
    test_list = []
    for atomic_test in tests:
        atomic_test_result = atomic_test.get("result")
        decoded_test_result = atomic_test_result == 'pass'
        if atomic_test_result == 'skip' or atomic_test.get(
                "skipped") == "true":
            decoded_test_result = None
        atomic_test_name = "{test_case_name}.{test_name}".format(
            test_case_name=test_case_name, test_name=atomic_test.get("name"))
        atomic_test_log = ""
        trace_node = atomic_test.find('.//StackTrace')
        if trace_node is not None:
            atomic_test_log = trace_node.text

        metadata, _ = SuiteMetadata.objects.get_or_create(
            suite=atomic_test_suite_name, name=atomic_test_name, kind='test')
        full_name = join_name(suite.slug, atomic_test_name)
        test_issues = issues.get(full_name, [])
        test_list.append(
            Test(
                test_run=testrun,
                suite=suite,
                metadata=metadata,
                result=decoded_test_result,
                log=atomic_test_log,
                has_known_issues=bool(test_issues),
            ))
        if decoded_test_result is True:
            local_status['tests_pass'] += 1
        elif decoded_test_result is False:
            if test_issues:
                local_status['tests_xfail'] += 1
            else:
                local_status['tests_fail'] += 1
        else:
            local_status['tests_skip'] += 1
    created_tests = Test.objects.bulk_create(test_list)
    for test in created_tests:
        if test.name in issues.keys():
            test.known_issues.add(issues[test.name])

    with transaction.atomic():
        tr_status = testrun.status.select_for_update().get(suite=None)
        tr_status.tests_pass += local_status['tests_pass']
        tr_status.tests_xfail += local_status['tests_xfail']
        tr_status.tests_fail += local_status['tests_fail']
        tr_status.tests_skip += local_status['tests_skip']
        tr_status.save()
    suite_status, _ = Status.objects.get_or_create(test_run=testrun,
                                                   suite=suite)
    with transaction.atomic():
        suite_status_for_update = Status.objects.select_for_update().get(
            pk=suite_status.pk)
        suite_status_for_update.tests_pass += local_status['tests_pass']
        suite_status_for_update.tests_xfail += local_status['tests_xfail']
        suite_status_for_update.tests_fail += local_status['tests_fail']
        suite_status_for_update.tests_skip += local_status['tests_skip']
        suite_status_for_update.save()
    logger.info("Deleting PluginScratch with ID: %s" % scratch_object.pk)
    scratch_object.delete()
    return 0
示例#24
0
 def test_join_group(self):
     self.assertEqual('foo/bar', join_name('foo', 'bar'))
示例#25
0
 def test_join_ungrouped(self):
     self.assertEqual('foo', join_name('/', 'foo'))
示例#26
0
文件: models.py 项目: Linaro/squad
 def __str__(self):
     if self.name == '-':
         return self.suite
     else:
         return join_name(self.suite, self.name)
示例#27
0
文件: models.py 项目: Linaro/squad
 def full_name(self):
     return join_name(self.suite.slug, self.name)
示例#28
0
 def test_join_group(self):
     self.assertEqual('foo/bar', join_name('foo', 'bar'))