Exemplo n.º 1
0
    def __call__(test_run):
        if test_run.data_processed:
            return

        issues = {}
        for issue in KnownIssue.active_by_environment(test_run.environment):
            issues.setdefault(issue.test_name, [])
            issues[issue.test_name].append(issue)

        for test in test_parser()(test_run.tests_file):
            # TODO: remove check below when test_name size changes in the schema
            if len(test['test_name']) > 256:
                continue
            suite = get_suite(
                test_run,
                test['group_name']
            )
            metadata, _ = SuiteMetadata.objects.get_or_create(suite=suite.slug, name=test['test_name'], kind='test')
            full_name = join_name(suite.slug, test['test_name'])
            test_issues = issues.get(full_name, [])
            test_obj = Test.objects.create(
                test_run=test_run,
                suite=suite,
                metadata=metadata,
                result=test['pass'],
                log=test['log'],
                has_known_issues=bool(test_issues),
                build=test_run.build,
                environment=test_run.environment,
            )
            for issue in test_issues:
                test_obj.known_issues.add(issue)

        for metric in metric_parser()(test_run.metrics_file):
            # TODO: remove check below when test_name size changes in the schema
            if len(metric['name']) > 256:
                continue
            suite = get_suite(
                test_run,
                metric['group_name']
            )
            metadata, _ = SuiteMetadata.objects.get_or_create(suite=suite.slug, name=metric['name'], kind='metric')
            Metric.objects.create(
                test_run=test_run,
                suite=suite,
                metadata=metadata,
                name=metric['name'],
                result=metric['result'],
                measurements=','.join([str(m) for m in metric['measurements']]),
                unit=metric['unit'],
            )

        test_run.data_processed = True
        test_run.save()
Exemplo n.º 2
0
    def _extract_cts_results(self, buf, testrun, suite_prefix):
        tradefed_tree = self.__parse_xml_results(buf)
        if tradefed_tree is None:
            return

        issues = {}
        for issue in KnownIssue.active_by_environment(testrun.environment):
            issues.setdefault(issue.test_name, [])
            issues[issue.test_name].append(issue)

        test_elems = tradefed_tree.findall(".//Test")
        logger.debug("Tests: {}".format(len(test_elems)))
        elems = tradefed_tree.findall('Module')
        logger.debug("Modules: {}".format(len(elems)))
        task_list = []
        for elem in elems:
            # Naming: Module Name + Test Case Name + Test Name
            if 'abi' in elem.attrib.keys():
                module_name = '.'.join(
                    [elem.attrib['abi'], elem.attrib['name']])
            else:
                module_name = elem.attrib['name']
            logger.debug("Extracting tests for module: {}".format(module_name))
            test_cases = elem.findall('.//TestCase')
            suite_metadata_list = []
            suite_list = []
            logger.debug("Extracting suite names")
            atomic_test_suite_name = "{suite_prefix}/{module_name}".format(
                suite_prefix=suite_prefix, module_name=module_name)
            logger.debug(
                "creating suite metadata: {}".format(atomic_test_suite_name))
            suite_metadata, _ = SuiteMetadata.objects.get_or_create(
                suite=atomic_test_suite_name, kind='suite')
            suite, _ = Suite.objects.get_or_create(
                slug=atomic_test_suite_name,
                project=testrun.build.project,
                defaults={"metadata": suite_metadata})
            logger.debug(
                "Adding status with suite: {suite_prefix}/{module_name}".
                format(suite_prefix=suite_prefix, module_name=module_name))
            logger.debug("Creating subtasks for extracting results")
            for test_case in test_cases:
                plugin_scratch = PluginScratch.objects.create(
                    build=testrun.build,
                    storage=ET.tostring(test_case).decode('utf-8'))
                logger.debug("Created plugin scratch with ID: %s" %
                             plugin_scratch.pk)
                task = create_testcase_tests.s(plugin_scratch.pk,
                                               atomic_test_suite_name,
                                               testrun.pk, suite.pk)
                task_list.append(task)

        celery_chord(task_list)(update_build_status.s(testrun.pk))
Exemplo n.º 3
0
 def test_active_known_issue(self):
     build = self.project.builds.create(
         datetime=self.date,
         version=self.date.strftime("%Y%m%d"),
     )
     test_run = build.test_runs.create(environment=self.env1)
     # create failed test
     test = test_run.tests.create(suite=self.suite1, name="test_foo", result=False)
     known_issue = KnownIssue.objects.create(
         title="foo",
         test_name=test.full_name
     )
     known_issue.save()
     known_issue.environments.add(test_run.environment)
     self.assertEqual(1, len(KnownIssue.active_by_environment(test_run.environment)))
 def test_active_known_issue(self):
     build = self.project.builds.create(
         datetime=self.date,
         version=self.date.strftime("%Y%m%d"),
     )
     test_run = build.test_runs.create(environment=self.env1)
     # create failed test
     test = test_run.tests.create(suite=self.suite1, name="test_foo", result=False)
     known_issue = KnownIssue.objects.create(
         title="foo",
         test_name=test.full_name
     )
     known_issue.save()
     known_issue.environments.add(test_run.environment)
     self.assertEqual(1, len(KnownIssue.active_by_environment(test_run.environment)))
Exemplo n.º 5
0
 def test_active_known_issue(self):
     build = self.project.builds.create(
         datetime=self.date,
         version=self.date.strftime("%Y%m%d"),
     )
     test_run = build.test_runs.create(environment=self.env1)
     # create failed test
     foo_metadata, _ = SuiteMetadata.objects.get_or_create(suite=self.suite1.slug, name='test_foo', kind='test')
     test = test_run.tests.create(build=test_run.build, environment=test_run.environment, suite=self.suite1, metadata=foo_metadata, result=False)
     known_issue = KnownIssue.objects.create(
         title="foo",
         test_name=test.full_name
     )
     known_issue.save()
     known_issue.environments.add(test_run.environment)
     self.assertEqual(1, len(KnownIssue.active_by_environment(test_run.environment)))
Exemplo n.º 6
0
    def __call__(test_run):
        if test_run.data_processed:
            return

        issues = {}
        for issue in KnownIssue.active_by_environment(test_run.environment):
            issues.setdefault(issue.test_name, [])
            issues[issue.test_name].append(issue)

        for test in test_parser()(test_run.tests_file):
            suite = get_suite(
                test_run,
                test['group_name']
            )
            metadata, _ = SuiteMetadata.objects.get_or_create(suite=suite.slug, name=test['test_name'], kind='test')
            full_name = join_name(suite.slug, test['test_name'])
            test_issues = issues.get(full_name, [])
            test = Test.objects.create(
                test_run=test_run,
                suite=suite,
                metadata=metadata,
                name=test['test_name'],
                result=test['pass'],
                has_known_issues=bool(test_issues),
            )
            for issue in test_issues:
                test.known_issues.add(issue)

        for metric in metric_parser()(test_run.metrics_file):
            suite = get_suite(
                test_run,
                metric['group_name']
            )
            metadata, _ = SuiteMetadata.objects.get_or_create(suite=suite.slug, name=metric['name'], kind='metric')
            Metric.objects.create(
                test_run=test_run,
                suite=suite,
                metadata=metadata,
                name=metric['name'],
                result=metric['result'],
                measurements=','.join([str(m) for m in metric['measurements']]),
            )

        test_run.data_processed = True
        test_run.save()
Exemplo n.º 7
0
    def __call__(test_run):
        if test_run.data_processed:
            return

        issues = {}
        for issue in KnownIssue.active_by_environment(test_run.environment):
            issues.setdefault(issue.test_name, [])
            issues[issue.test_name].append(issue)

        for test in test_parser()(test_run.tests_file):
            suite = get_suite(test_run, test['group_name'])
            metadata, _ = SuiteMetadata.objects.get_or_create(
                suite=suite.slug, name=test['test_name'], kind='test')
            full_name = join_name(suite.slug, test['test_name'])
            test_issues = issues.get(full_name, [])
            test = Test.objects.create(
                test_run=test_run,
                suite=suite,
                metadata=metadata,
                name=test['test_name'],
                result=test['pass'],
                has_known_issues=bool(test_issues),
            )
            for issue in test_issues:
                test.known_issues.add(issue)

        for metric in metric_parser()(test_run.metrics_file):
            suite = get_suite(test_run, metric['group_name'])
            metadata, _ = SuiteMetadata.objects.get_or_create(
                suite=suite.slug, name=metric['name'], kind='metric')
            Metric.objects.create(
                test_run=test_run,
                suite=suite,
                metadata=metadata,
                name=metric['name'],
                result=metric['result'],
                measurements=','.join([str(m)
                                       for m in metric['measurements']]),
            )

        test_run.data_processed = True
        test_run.save()
Exemplo n.º 8
0
    def __call__(test_run):
        if test_run.data_processed:
            return

        issues = {}
        for issue in KnownIssue.active_by_environment(test_run.environment):
            issues.setdefault(issue.test_name, [])
            issues[issue.test_name].append(issue)

        # Issues' test_name should be interpreted as regexes
        # so compile them prior to matching against test names
        # The * character should be replaced by .*?, which is regex for "everything"
        issues_regex = {}
        for test_name_regex in issues.keys():
            pattern = re.escape(test_name_regex).replace('\\*', '.*?')
            regex = re.compile(pattern)
            issues_regex[regex] = issues[test_name_regex]

        for test in test_parser()(test_run.tests_file):
            # TODO: remove check below when test_name size changes in the schema
            if len(test['test_name']) > 256:
                continue
            suite = get_suite(test_run, test['group_name'])
            metadata, _ = SuiteMetadata.objects.get_or_create(
                suite=suite.slug, name=test['test_name'], kind='test')
            full_name = join_name(suite.slug, test['test_name'])

            test_issues = list(
                itertools.chain(*[
                    issue for regex, issue in issues_regex.items()
                    if regex.match(full_name)
                ]))

            test_obj = Test.objects.create(
                test_run=test_run,
                suite=suite,
                metadata=metadata,
                result=test['pass'],
                log=test['log'],
                has_known_issues=bool(test_issues),
                build=test_run.build,
                environment=test_run.environment,
            )
            for issue in test_issues:
                test_obj.known_issues.add(issue)

        for metric in metric_parser()(test_run.metrics_file):
            # TODO: remove check below when test_name size changes in the schema
            if len(metric['name']) > 256:
                continue
            suite = get_suite(test_run, metric['group_name'])
            metadata, _ = SuiteMetadata.objects.get_or_create(
                suite=suite.slug, name=metric['name'], kind='metric')
            Metric.objects.create(
                test_run=test_run,
                suite=suite,
                metadata=metadata,
                result=metric['result'],
                measurements=','.join([str(m)
                                       for m in metric['measurements']]),
                unit=metric['unit'],
                build=test_run.build,
                environment=test_run.environment,
            )

        test_run.data_processed = True
        test_run.save()
Exemplo n.º 9
0
def create_testcase_tests(test_case_string_storage_id, atomic_test_suite_name,
                          testrun_id, suite_id):
    test_case_string = None
    scratch_object = None
    try:
        scratch_object = PluginScratch.objects.get(
            pk=test_case_string_storage_id)
        test_case_string = scratch_object.storage
    except PluginScratch.DoesNotExist:
        logger.warning("PluginScratch with ID: %s doesn't exist" %
                       test_case_string_storage_id)
        return

    test_case = ET.fromstring(test_case_string)
    testrun = TestRun.objects.get(pk=testrun_id)
    suite = Suite.objects.get(pk=suite_id)
    local_status = {
        'tests_pass': 0,
        'tests_xfail': 0,
        'tests_fail': 0,
        'tests_skip': 0
    }
    issues = {}
    for issue in KnownIssue.active_by_environment(testrun.environment):
        issues.setdefault(issue.test_name, [])
        issues[issue.test_name].append(issue)

    test_case_name = test_case.get("name")
    tests = test_case.findall('.//Test')
    logger.debug("Extracting TestCase: {test_case_name}".format(
        test_case_name=test_case_name))
    logger.debug("Adding {} testcases".format(len(tests)))
    test_list = []
    for atomic_test in tests:
        atomic_test_result = atomic_test.get("result")
        decoded_test_result = atomic_test_result == 'pass'
        if atomic_test_result == 'skip' or atomic_test.get(
                "skipped") == "true":
            decoded_test_result = None
        atomic_test_name = "{test_case_name}.{test_name}".format(
            test_case_name=test_case_name, test_name=atomic_test.get("name"))
        atomic_test_log = ""
        trace_node = atomic_test.find('.//StackTrace')
        if trace_node is not None:
            atomic_test_log = trace_node.text

        metadata, _ = SuiteMetadata.objects.get_or_create(
            suite=atomic_test_suite_name, name=atomic_test_name, kind='test')
        full_name = join_name(suite.slug, atomic_test_name)
        test_issues = issues.get(full_name, [])
        test_list.append(
            Test(
                test_run=testrun,
                suite=suite,
                metadata=metadata,
                result=decoded_test_result,
                log=atomic_test_log,
                has_known_issues=bool(test_issues),
            ))
        if decoded_test_result is True:
            local_status['tests_pass'] += 1
        elif decoded_test_result is False:
            if test_issues:
                local_status['tests_xfail'] += 1
            else:
                local_status['tests_fail'] += 1
        else:
            local_status['tests_skip'] += 1
    created_tests = Test.objects.bulk_create(test_list)
    for test in created_tests:
        if test.name in issues.keys():
            test.known_issues.add(issues[test.name])

    with transaction.atomic():
        tr_status = testrun.status.select_for_update().get(suite=None)
        tr_status.tests_pass += local_status['tests_pass']
        tr_status.tests_xfail += local_status['tests_xfail']
        tr_status.tests_fail += local_status['tests_fail']
        tr_status.tests_skip += local_status['tests_skip']
        tr_status.save()
    suite_status, _ = Status.objects.get_or_create(test_run=testrun,
                                                   suite=suite)
    with transaction.atomic():
        suite_status_for_update = Status.objects.select_for_update().get(
            pk=suite_status.pk)
        suite_status_for_update.tests_pass += local_status['tests_pass']
        suite_status_for_update.tests_xfail += local_status['tests_xfail']
        suite_status_for_update.tests_fail += local_status['tests_fail']
        suite_status_for_update.tests_skip += local_status['tests_skip']
        suite_status_for_update.save()
    logger.info("Deleting PluginScratch with ID: %s" % scratch_object.pk)
    scratch_object.delete()
    return 0