Ejemplo n.º 1
0
    def test_inactive_by_project(self):
        build = self.project.builds.create(
            datetime=self.date,
            version=self.date.strftime("%Y%m%d"),
        )
        test_run = build.test_runs.create(environment=self.env1)
        # create failed test
        foo_metadata, _ = SuiteMetadata.objects.get_or_create(
            suite=self.suite1.slug, name='test_foo', kind='test')
        test = test_run.tests.create(build=test_run.build,
                                     environment=test_run.environment,
                                     suite=self.suite1,
                                     metadata=foo_metadata,
                                     result=False)
        known_issue = KnownIssue.objects.create(title="foo",
                                                test_name=test.full_name)
        known_issue.save()
        known_issue.environments.add(test_run.environment)
        known_issue.active = False
        known_issue.save()

        self.assertEqual(
            0,
            len(
                KnownIssue.active_by_project_and_test(self.project,
                                                      test.full_name)))
Ejemplo n.º 2
0
    def __init__(self, project, full_test_name, top=None, page=1, per_page=20):
        suite_slug, test_name = parse_name(full_test_name)
        self.test = full_test_name

        self.paginator = Paginator(project.builds.prefetch_related(Prefetch('test_runs', queryset=TestRun.objects.prefetch_related('environment').all())).reverse(), per_page)
        if top:
            self.number = 0
            builds = project.builds.filter(datetime__lte=top.datetime).reverse()[0:per_page - 1]
        else:
            self.number = page
            builds = self.paginator.page(page)

        self.top = builds[0]

        environments = OrderedDict()
        results = OrderedDict()
        for build in builds:
            results[build] = {}

        issues_by_env = {}
        for issue in KnownIssue.active_by_project_and_test(project, full_test_name).all():
            for env in issue.environments.all():
                if env.id not in issues_by_env:
                    issues_by_env[env.id] = []
                issues_by_env[env.id].append(issue)

        suite = project.suites.prefetch_related('metadata').get(slug=suite_slug)
        metadata = SuiteMetadata.objects.get(kind='test', suite=suite_slug, name=test_name)
        tests = Test.objects.filter(build__in=builds, metadata_id=metadata.id).prefetch_related('build', 'environment', 'test_run', 'metadata').order_by()
        all_envs = set(project.environments.all())
        for test in tests:
            build = test.build
            environment = test.environment
            environments[environment] = True
            known_issues = issues_by_env.get(environment.id)
            is_duplicate = False
            if environment in results[build]:
                is_duplicate = True
            results[build][environment] = TestResult(test, suite, metadata, known_issues, is_duplicate)

        for build in results.keys():
            recorded_envs = set(results[build].keys())
            remaining_envs = all_envs - recorded_envs
            for env in remaining_envs:
                results[build][env] = None
                environments[env] = True
        # Make sure all builds that don't have the test have None at least
        for b in builds:
            if not results[b]:
                for env in all_envs:
                    results[build][env] = None
                    environments[env] = True

        self.environments = sorted(environments.keys(), key=lambda env: env.slug)
        self.results = results
Ejemplo n.º 3
0
    def __call__(test_run):
        if test_run.data_processed:
            return

        issues = {}
        for issue in KnownIssue.active_by_environment(test_run.environment):
            issues.setdefault(issue.test_name, [])
            issues[issue.test_name].append(issue)

        for test in test_parser()(test_run.tests_file):
            # TODO: remove check below when test_name size changes in the schema
            if len(test['test_name']) > 256:
                continue
            suite = get_suite(
                test_run,
                test['group_name']
            )
            metadata, _ = SuiteMetadata.objects.get_or_create(suite=suite.slug, name=test['test_name'], kind='test')
            full_name = join_name(suite.slug, test['test_name'])
            test_issues = issues.get(full_name, [])
            test_obj = Test.objects.create(
                test_run=test_run,
                suite=suite,
                metadata=metadata,
                result=test['pass'],
                log=test['log'],
                has_known_issues=bool(test_issues),
                build=test_run.build,
                environment=test_run.environment,
            )
            for issue in test_issues:
                test_obj.known_issues.add(issue)

        for metric in metric_parser()(test_run.metrics_file):
            # TODO: remove check below when test_name size changes in the schema
            if len(metric['name']) > 256:
                continue
            suite = get_suite(
                test_run,
                metric['group_name']
            )
            metadata, _ = SuiteMetadata.objects.get_or_create(suite=suite.slug, name=metric['name'], kind='metric')
            Metric.objects.create(
                test_run=test_run,
                suite=suite,
                metadata=metadata,
                name=metric['name'],
                result=metric['result'],
                measurements=','.join([str(m) for m in metric['measurements']]),
                unit=metric['unit'],
            )

        test_run.data_processed = True
        test_run.save()
Ejemplo n.º 4
0
    def _extract_cts_results(self, buf, testrun, suite_prefix):
        tradefed_tree = self.__parse_xml_results(buf)
        if tradefed_tree is None:
            return

        issues = {}
        for issue in KnownIssue.active_by_environment(testrun.environment):
            issues.setdefault(issue.test_name, [])
            issues[issue.test_name].append(issue)

        test_elems = tradefed_tree.findall(".//Test")
        logger.debug("Tests: {}".format(len(test_elems)))
        elems = tradefed_tree.findall('Module')
        logger.debug("Modules: {}".format(len(elems)))
        task_list = []
        for elem in elems:
            # Naming: Module Name + Test Case Name + Test Name
            if 'abi' in elem.attrib.keys():
                module_name = '.'.join(
                    [elem.attrib['abi'], elem.attrib['name']])
            else:
                module_name = elem.attrib['name']
            logger.debug("Extracting tests for module: {}".format(module_name))
            test_cases = elem.findall('.//TestCase')
            suite_metadata_list = []
            suite_list = []
            logger.debug("Extracting suite names")
            atomic_test_suite_name = "{suite_prefix}/{module_name}".format(
                suite_prefix=suite_prefix, module_name=module_name)
            logger.debug(
                "creating suite metadata: {}".format(atomic_test_suite_name))
            suite_metadata, _ = SuiteMetadata.objects.get_or_create(
                suite=atomic_test_suite_name, kind='suite')
            suite, _ = Suite.objects.get_or_create(
                slug=atomic_test_suite_name,
                project=testrun.build.project,
                defaults={"metadata": suite_metadata})
            logger.debug(
                "Adding status with suite: {suite_prefix}/{module_name}".
                format(suite_prefix=suite_prefix, module_name=module_name))
            logger.debug("Creating subtasks for extracting results")
            for test_case in test_cases:
                plugin_scratch = PluginScratch.objects.create(
                    build=testrun.build,
                    storage=ET.tostring(test_case).decode('utf-8'))
                logger.debug("Created plugin scratch with ID: %s" %
                             plugin_scratch.pk)
                task = create_testcase_tests.s(plugin_scratch.pk,
                                               atomic_test_suite_name,
                                               testrun.pk, suite.pk)
                task_list.append(task)

        celery_chord(task_list)(update_build_status.s(testrun.pk))
Ejemplo n.º 5
0
 def test_active_by_project(self):
     build = self.project.builds.create(
         datetime=self.date,
         version=self.date.strftime("%Y%m%d"),
     )
     test_run = build.test_runs.create(environment=self.env1)
     # create failed test
     test = test_run.tests.create(suite=self.suite1, name="test_foo", result=False)
     known_issue = KnownIssue.objects.create(
         title="foo",
         test_name=test.full_name
     )
     known_issue.save()
     known_issue.environments.add(test_run.environment)
     self.assertEqual(1, len(KnownIssue.active_by_project_and_test(self.project, test.full_name)))
 def test_active_by_project(self):
     build = self.project.builds.create(
         datetime=self.date,
         version=self.date.strftime("%Y%m%d"),
     )
     test_run = build.test_runs.create(environment=self.env1)
     # create failed test
     test = test_run.tests.create(suite=self.suite1, name="test_foo", result=False)
     known_issue = KnownIssue.objects.create(
         title="foo",
         test_name=test.full_name
     )
     known_issue.save()
     known_issue.environments.add(test_run.environment)
     self.assertEqual(1, len(KnownIssue.active_by_project_and_test(self.project, test.full_name)))
Ejemplo n.º 7
0
    def __call__(test_run):
        if test_run.data_processed:
            return

        issues = {}
        for issue in KnownIssue.active_by_environment(test_run.environment):
            issues.setdefault(issue.test_name, [])
            issues[issue.test_name].append(issue)

        for test in test_parser()(test_run.tests_file):
            suite = get_suite(
                test_run,
                test['group_name']
            )
            metadata, _ = SuiteMetadata.objects.get_or_create(suite=suite.slug, name=test['test_name'], kind='test')
            full_name = join_name(suite.slug, test['test_name'])
            test_issues = issues.get(full_name, [])
            test = Test.objects.create(
                test_run=test_run,
                suite=suite,
                metadata=metadata,
                name=test['test_name'],
                result=test['pass'],
                has_known_issues=bool(test_issues),
            )
            for issue in test_issues:
                test.known_issues.add(issue)

        for metric in metric_parser()(test_run.metrics_file):
            suite = get_suite(
                test_run,
                metric['group_name']
            )
            metadata, _ = SuiteMetadata.objects.get_or_create(suite=suite.slug, name=metric['name'], kind='metric')
            Metric.objects.create(
                test_run=test_run,
                suite=suite,
                metadata=metadata,
                name=metric['name'],
                result=metric['result'],
                measurements=','.join([str(m) for m in metric['measurements']]),
            )

        test_run.data_processed = True
        test_run.save()
Ejemplo n.º 8
0
    def __call__(test_run):
        if test_run.data_processed:
            return

        issues = {}
        for issue in KnownIssue.active_by_environment(test_run.environment):
            issues.setdefault(issue.test_name, [])
            issues[issue.test_name].append(issue)

        for test in test_parser()(test_run.tests_file):
            suite = get_suite(test_run, test['group_name'])
            metadata, _ = SuiteMetadata.objects.get_or_create(
                suite=suite.slug, name=test['test_name'], kind='test')
            full_name = join_name(suite.slug, test['test_name'])
            test_issues = issues.get(full_name, [])
            test = Test.objects.create(
                test_run=test_run,
                suite=suite,
                metadata=metadata,
                name=test['test_name'],
                result=test['pass'],
                has_known_issues=bool(test_issues),
            )
            for issue in test_issues:
                test.known_issues.add(issue)

        for metric in metric_parser()(test_run.metrics_file):
            suite = get_suite(test_run, metric['group_name'])
            metadata, _ = SuiteMetadata.objects.get_or_create(
                suite=suite.slug, name=metric['name'], kind='metric')
            Metric.objects.create(
                test_run=test_run,
                suite=suite,
                metadata=metadata,
                name=metric['name'],
                result=metric['result'],
                measurements=','.join([str(m)
                                       for m in metric['measurements']]),
            )

        test_run.data_processed = True
        test_run.save()
Ejemplo n.º 9
0
    def __call__(test_run):
        if test_run.data_processed:
            return

        issues = {}
        for issue in KnownIssue.active_by_environment(test_run.environment):
            issues.setdefault(issue.test_name, [])
            issues[issue.test_name].append(issue)

        # Issues' test_name should be interpreted as regexes
        # so compile them prior to matching against test names
        # The * character should be replaced by .*?, which is regex for "everything"
        issues_regex = {}
        for test_name_regex in issues.keys():
            pattern = re.escape(test_name_regex).replace('\\*', '.*?')
            regex = re.compile(pattern)
            issues_regex[regex] = issues[test_name_regex]

        for test in test_parser()(test_run.tests_file):
            # TODO: remove check below when test_name size changes in the schema
            if len(test['test_name']) > 256:
                continue
            suite = get_suite(test_run, test['group_name'])
            metadata, _ = SuiteMetadata.objects.get_or_create(
                suite=suite.slug, name=test['test_name'], kind='test')
            full_name = join_name(suite.slug, test['test_name'])

            test_issues = list(
                itertools.chain(*[
                    issue for regex, issue in issues_regex.items()
                    if regex.match(full_name)
                ]))

            test_obj = Test.objects.create(
                test_run=test_run,
                suite=suite,
                metadata=metadata,
                result=test['pass'],
                log=test['log'],
                has_known_issues=bool(test_issues),
                build=test_run.build,
                environment=test_run.environment,
            )
            for issue in test_issues:
                test_obj.known_issues.add(issue)

        for metric in metric_parser()(test_run.metrics_file):
            # TODO: remove check below when test_name size changes in the schema
            if len(metric['name']) > 256:
                continue
            suite = get_suite(test_run, metric['group_name'])
            metadata, _ = SuiteMetadata.objects.get_or_create(
                suite=suite.slug, name=metric['name'], kind='metric')
            Metric.objects.create(
                test_run=test_run,
                suite=suite,
                metadata=metadata,
                result=metric['result'],
                measurements=','.join([str(m)
                                       for m in metric['measurements']]),
                unit=metric['unit'],
                build=test_run.build,
                environment=test_run.environment,
            )

        test_run.data_processed = True
        test_run.save()
Ejemplo n.º 10
0
 def known_issues(self):
     return KnownIssue.active_by_project_and_test(self.project)
Ejemplo n.º 11
0
def create_testcase_tests(test_case_string_storage_id, atomic_test_suite_name,
                          testrun_id, suite_id):
    test_case_string = None
    scratch_object = None
    try:
        scratch_object = PluginScratch.objects.get(
            pk=test_case_string_storage_id)
        test_case_string = scratch_object.storage
    except PluginScratch.DoesNotExist:
        logger.warning("PluginScratch with ID: %s doesn't exist" %
                       test_case_string_storage_id)
        return

    test_case = ET.fromstring(test_case_string)
    testrun = TestRun.objects.get(pk=testrun_id)
    suite = Suite.objects.get(pk=suite_id)
    local_status = {
        'tests_pass': 0,
        'tests_xfail': 0,
        'tests_fail': 0,
        'tests_skip': 0
    }
    issues = {}
    for issue in KnownIssue.active_by_environment(testrun.environment):
        issues.setdefault(issue.test_name, [])
        issues[issue.test_name].append(issue)

    test_case_name = test_case.get("name")
    tests = test_case.findall('.//Test')
    logger.debug("Extracting TestCase: {test_case_name}".format(
        test_case_name=test_case_name))
    logger.debug("Adding {} testcases".format(len(tests)))
    test_list = []
    for atomic_test in tests:
        atomic_test_result = atomic_test.get("result")
        decoded_test_result = atomic_test_result == 'pass'
        if atomic_test_result == 'skip' or atomic_test.get(
                "skipped") == "true":
            decoded_test_result = None
        atomic_test_name = "{test_case_name}.{test_name}".format(
            test_case_name=test_case_name, test_name=atomic_test.get("name"))
        atomic_test_log = ""
        trace_node = atomic_test.find('.//StackTrace')
        if trace_node is not None:
            atomic_test_log = trace_node.text

        metadata, _ = SuiteMetadata.objects.get_or_create(
            suite=atomic_test_suite_name, name=atomic_test_name, kind='test')
        full_name = join_name(suite.slug, atomic_test_name)
        test_issues = issues.get(full_name, [])
        test_list.append(
            Test(
                test_run=testrun,
                suite=suite,
                metadata=metadata,
                result=decoded_test_result,
                log=atomic_test_log,
                has_known_issues=bool(test_issues),
            ))
        if decoded_test_result is True:
            local_status['tests_pass'] += 1
        elif decoded_test_result is False:
            if test_issues:
                local_status['tests_xfail'] += 1
            else:
                local_status['tests_fail'] += 1
        else:
            local_status['tests_skip'] += 1
    created_tests = Test.objects.bulk_create(test_list)
    for test in created_tests:
        if test.name in issues.keys():
            test.known_issues.add(issues[test.name])

    with transaction.atomic():
        tr_status = testrun.status.select_for_update().get(suite=None)
        tr_status.tests_pass += local_status['tests_pass']
        tr_status.tests_xfail += local_status['tests_xfail']
        tr_status.tests_fail += local_status['tests_fail']
        tr_status.tests_skip += local_status['tests_skip']
        tr_status.save()
    suite_status, _ = Status.objects.get_or_create(test_run=testrun,
                                                   suite=suite)
    with transaction.atomic():
        suite_status_for_update = Status.objects.select_for_update().get(
            pk=suite_status.pk)
        suite_status_for_update.tests_pass += local_status['tests_pass']
        suite_status_for_update.tests_xfail += local_status['tests_xfail']
        suite_status_for_update.tests_fail += local_status['tests_fail']
        suite_status_for_update.tests_skip += local_status['tests_skip']
        suite_status_for_update.save()
    logger.info("Deleting PluginScratch with ID: %s" % scratch_object.pk)
    scratch_object.delete()
    return 0
Ejemplo n.º 12
0
 def known_issues(self):
     return KnownIssue.active_by_project_and_test(self.project)