Ejemplo n.º 1
0
    def __extract_test_results__(self, test_runs_ids):
        self.__failures__ = OrderedDict()

        tests = models.Test.objects.filter(
            test_run_id__in=test_runs_ids.keys()).annotate(suite_slug=F(
                'suite__slug'), ).prefetch_related('metadata').defer('log')

        for test in tests:
            build, env = test_runs_ids.get(test.test_run_id)

            full_name = join_name(test.suite_slug, test.name)
            if full_name not in self.results:
                self.results[full_name] = OrderedDict()

            key = (build, env)
            if key in self.results[full_name]:  # Duplicate found.
                if not isinstance(self.results[full_name][key], tuple):
                    # Test confidence is NOT already caclulated.
                    self.results[full_name][key] = test_confidence(test)
            else:
                self.results[full_name][key] = test.status

            if test.has_known_issues:
                self.tests_with_issues[test.id] = (full_name, env)

            if test.status == 'fail' and build.id == self.builds[-1].id:
                if env not in self.__failures__:
                    self.__failures__[env] = []
                self.__failures__[env].append(test)
Ejemplo n.º 2
0
 def __init__(self,
              test,
              suite,
              metadata,
              known_issues,
              is_duplicate=False):
     self.test = test
     self.suite = suite
     self.known_issues = known_issues
     if is_duplicate:
         self.status, self.confidence_score = test_confidence(test)
     else:
         self.status, self.confidence_score = (test.status, None)
     self.test_run = test.test_run
     self.test_run_status = self.TestRunStatus(self.test_run, self.suite)
     self.info = {
         "test_description":
         metadata.description if metadata else '',
         "test_instructions":
         metadata.instructions_to_reproduce if metadata else '',
         "suite_instructions":
         self.suite.metadata.instructions_to_reproduce
         if self.suite.metadata else '',
         "test_log":
         test.log
     }
Ejemplo n.º 3
0
    def get(cls, build, page, search, per_page=50):
        table = cls()
        table.__get_all_tests__(build, search)
        table.number = page
        table.__count_pages__(per_page)

        table.environments = set([t.environment for t in build.test_runs.prefetch_related('environment').all()])

        tests = Test.objects.filter(
            id__in=table.__get_page_filter__(page, per_page)
        ).prefetch_related(
            Prefetch('test_run', queryset=TestRun.objects.only('environment')),
            'suite__metadata',
            'metadata',
        )

        memo = {}
        for test in tests:
            memo.setdefault(test.full_name, {})
            if test.environment_id not in memo[test.full_name]:
                memo[test.full_name][test.environment_id] = []
            memo[test.full_name][test.environment_id].append(test)

        # handle duplicates
        for full_name in memo.keys():
            env_ids = memo[full_name].keys()
            for env_id in env_ids:

                test = memo[full_name][env_id][0]

                if len(memo[full_name][env_id]) == 1:
                    memo[full_name][env_id] = [test.status, None]
                else:
                    duplicates = memo[full_name][env_id]
                    memo[full_name][env_id] = list(test_confidence(None, list_of_duplicates=duplicates))

                error_info = {
                    "test_description": test.metadata.description if test.metadata else '',
                    "suite_instructions": test.suite.metadata.instructions_to_reproduce if test.suite.metadata else '',
                    "test_instructions": test.metadata.instructions_to_reproduce if test.metadata else '',
                    "test_log": test.log or '',
                }
                info = json.dumps(error_info) if any(error_info.values()) else None

                memo[full_name][env_id].append(info)

            if 'test_metadata' not in memo[full_name].keys():
                memo[full_name]['test_metadata'] = (test.test_run, test.suite, test.name)

        for test_full_name, results in memo.items():
            test_result = TestResult(test_full_name)
            test_result.test_run, test_result.suite, test_result.short_name = results.get('test_metadata', None)
            for env in table.environments:
                test_result.append(results.get(env.id, ["n/a", None]))
            table.append(test_result)

        table.sort()

        return table
Ejemplo n.º 4
0
Archivo: views.py Proyecto: chaws/squad
def test_run_suite_test_details(request, group_slug, project_slug,
                                build_version, testrun, suite_slug, test_name):
    context = __test_run_suite_context__(request, group_slug, project_slug,
                                         build_version, testrun, suite_slug)
    test_name = test_name.replace("$", "/")
    suite_slug = suite_slug.replace("$", "/")
    metadata = get_object_or_404(SuiteMetadata,
                                 kind='test',
                                 suite=suite_slug,
                                 name=test_name)
    tests = Test.objects.filter(suite=context['suite'],
                                metadata=metadata,
                                build=context['build'],
                                environment=context['test_run'].environment)
    if len(tests) == 0:
        raise Http404()

    # There's more then one test that meets the criteria, this usually
    # means resubmitted job.
    if len(tests) > 1:
        # Calculate the most common status and confidence score.
        status, confidence_score = test_confidence(tests.first())
        # Take the first test with the most common status as the relevant
        # one.
        for t in tests:
            if t.status == status:
                test = t
        if not test:
            # Something went wrong, we're supposed to find a test by now.
            raise Http404()
        else:
            test.confidence_score = confidence_score
    else:
        test = tests.first()

    attachments = [
        (f['filename'], file_type(f['filename']), f['length'])
        for f in context['test_run'].attachments.values('filename', 'length')
    ]

    context.update({'test': test, 'attachments': attachments})
    return render(request, 'squad/test_run_suite_test_details.jinja2', context)