def rewrite_tests_descriptions(testrail_project, tests):
    test_suite = TestRailSettings.tests_suite
    suite = testrail_project.get_suite_by_name(test_suite)

    # remove old sections and test cases
    old_sections = testrail_project.get_sections(suite_id=suite['id'])
    for section in old_sections:
        if section["parent_id"] is None:
            testrail_project.delete_section(section["id"])

    # create new groups
    for group in TEST_GROUPS:
        testrail_project.create_section(suite["id"], group)

    api_group = testrail_project.get_section_by_name(suite["id"], "API")
    for section in TEST_SECTIONS:
        testrail_project.create_section(suite["id"], section, api_group["id"])

    # add test cases to test suite in 100 parallel threads
    logger.info("Add cases")
    for test_section, test_list in tests.iteritems():
        section = testrail_project.get_section_by_name(
            suite_id=suite['id'], section_name=test_section)
        Parallel(n_jobs=100)(delayed(add_case)
                             (testrail_project, suite, test_case, section)
                             for test_case in test_list)
def get_testrail():
    """ Get test rail instance """
    logger.info('Initializing TestRail Project configuration...')
    return TestRailProject(url=TestRailSettings.url,
                           user=TestRailSettings.user,
                           password=TestRailSettings.password,
                           project=TestRailSettings.project)
示例#3
0
def get_testrail():
    """ Get test rail instance """
    logger.info('Initializing TestRail Project configuration...')
    return TestRailProject(url=TestRailSettings.url,
                           user=TestRailSettings.user,
                           password=TestRailSettings.password,
                           project=TestRailSettings.project)
def rewrite_tests_descriptions(testrail_project, tests):
    test_suite = TestRailSettings.tests_suite
    suite = testrail_project.get_suite_by_name(test_suite)

    # remove old sections and test cases
    old_sections = testrail_project.get_sections(suite_id=suite['id'])
    for section in old_sections:
        if section["parent_id"] is None:
            testrail_project.delete_section(section["id"])

    # create new groups
    for group in TEST_GROUPS:
        testrail_project.create_section(suite["id"], group)

    api_group = testrail_project.get_section_by_name(suite["id"], "API")
    for section in TEST_SECTIONS:
        testrail_project.create_section(suite["id"], section, api_group["id"])

    # add test cases to test suite in 100 parallel threads
    logger.info("Add cases")
    for test_section, test_list in tests.iteritems():
        section = testrail_project.get_section_by_name(
            suite_id=suite['id'], section_name=test_section)
        Parallel(n_jobs=100)(
            delayed(add_case)(testrail_project, suite, test_case, section)
            for test_case in test_list)
示例#5
0
    def handle_blocked(self, test, result):
        if result['custom_launchpad_bug']:
            return False
        m = re.search(r'Blocked by "(\S+)" test.', result['comment'] or '')
        if m:
            blocked_test_group = m.group(1)
        else:
            logger.warning('Blocked result #{0} for test {1} does '
                           'not have upstream test name in its '
                           'comments!'.format(result['id'],
                                              test['custom_test_group']))
            return False

        if not result['version']:
            logger.debug('Blocked result #{0} for test {1} does '
                         'not have version, can\'t find upstream '
                         'test case!'.format(result['id'],
                                             test['custom_test_group']))
            return False

        bug_link = None
        blocked_test = self.get_test_by_group(blocked_test_group,
                                              result['version'])
        if not blocked_test:
            return False
        logger.debug('Test {0} was blocked by failed test {1}'.format(
            test['custom_test_group'], blocked_test_group))

        blocked_results = self.project.get_results_for_test(
            blocked_test['id'])

        # Since we manually add results to failed tests with statuses
        # ProdFailed, TestFailed, etc. and attach bugs links to them,
        # we could skip original version copying. So look for test
        # results with target version, but allow to copy links to bugs
        # from other results of the same test (newer are checked first)
        if not any(br['version'] == result['version'] and
                   br['status_id'] in self.failed_statuses
                   for br in blocked_results):
            logger.debug('Did not find result for test {0} with version '
                         '{1}!'.format(blocked_test_group, result['version']))
            return False

        for blocked_result in sorted(blocked_results,
                                     key=lambda x: x['id'],
                                     reverse=True):
            if blocked_result['status_id'] not in self.failed_statuses:
                continue

            if blocked_result['custom_launchpad_bug']:
                bug_link = blocked_result['custom_launchpad_bug']
                break

        if bug_link is not None:
            result['custom_launchpad_bug'] = bug_link
            self.project.add_raw_results_for_test(test['id'], result)
            logger.info('Added bug {0} to blocked result of {1} test.'.format(
                bug_link, test['custom_test_group']))
            return bug_link
        return False
    def handle_blocked(self, test, result):
        if result['custom_launchpad_bug']:
            return False
        m = re.search(r'Blocked by "(\S+)" test.', result['comment'])
        if m:
            blocked_test_group = m.group(1)
        else:
            logger.debug('Blocked result #{0} for test {1} does '
                         'not have upstream test name in its '
                         'comments!'.format(result['id'],
                                            test['custom_test_group']))
            return False

        if not result['version']:
            logger.debug('Blocked result #{0} for test {1} does '
                         'not have version, can\'t find upstream '
                         'test case!'.format(result['id'],
                                             test['custom_test_group']))
            return False

        bug_link = None
        blocked_test = self.get_test_by_group(blocked_test_group,
                                              result['version'])
        if not blocked_test:
            return False
        logger.debug('Test {0} was blocked by failed test {1}'.format(
            test['custom_test_group'], blocked_test_group))

        blocked_results = self.project.get_results_for_test(blocked_test['id'])

        # Since we manually add results to failed tests with statuses
        # ProdFailed, TestFailed, etc. and attach bugs links to them,
        # we could skip original version copying. So look for test
        # results with target version, but allow to copy links to bugs
        # from other results of the same test (newer are checked first)
        if not any(br['version'] == result['version']
                   and br['status_id'] in self.failed_statuses
                   for br in blocked_results):
            logger.debug('Did not find result for test {0} with version '
                         '{1}!'.format(blocked_test_group, result['version']))
            return False

        for blocked_result in sorted(blocked_results,
                                     key=lambda x: x['id'],
                                     reverse=True):
            if blocked_result['status_id'] not in self.failed_statuses:
                continue

            if blocked_result['custom_launchpad_bug']:
                bug_link = blocked_result['custom_launchpad_bug']
                break

        if bug_link is not None:
            result['custom_launchpad_bug'] = bug_link
            self.project.add_raw_results_for_test(test['id'], result)
            logger.info('Added bug {0} to blocked result of {1} test.'.format(
                bug_link, test['custom_test_group']))
            return bug_link
        return False
def get_testrail():
    logger.info("Initializing TestRail Project configuration...")
    return TestRailProject(
        url=TestRailSettings.url,
        user=TestRailSettings.user,
        password=TestRailSettings.password,
        project=TestRailSettings.project,
    )
    def bugs_statistics(self):
        if self._bugs_statistics != {}:
            return self._bugs_statistics
        logger.info(
            'Collecting stats for TestRun "{0}" on "{1}"...'.format(
                self.run["name"], self.run["config"] or "default config"
            )
        )

        for test in self.tests:
            logger.debug('Checking "{0}" test...'.format(test["title"]))
            test_results = sorted(
                self.project.get_results_for_test(test["id"], self.results), key=lambda x: x["id"], reverse=True
            )

            linked_bugs = []
            is_blocked = False

            for result in test_results:
                if result["status_id"] in self.blocked_statuses:
                    if self.check_blocked:
                        new_bug_link = self.handle_blocked(test, result)
                        if new_bug_link:
                            linked_bugs.append(new_bug_link)
                            is_blocked = True
                            break
                    if result["custom_launchpad_bug"]:
                        linked_bugs.append(result["custom_launchpad_bug"])
                        is_blocked = True
                        break
                if result["status_id"] in self.failed_statuses and result["custom_launchpad_bug"]:
                    linked_bugs.append(result["custom_launchpad_bug"])

            bug_ids = set(
                [
                    re.search(r".*bugs?/(\d+)/?", link).group(1)
                    for link in linked_bugs
                    if re.search(r".*bugs?/(\d+)/?", link)
                ]
            )

            for bug_id in bug_ids:
                if bug_id in self._bugs_statistics:
                    self._bugs_statistics[bug_id][test["id"]] = {
                        "group": test["custom_test_group"] or "manual",
                        "config": self.run["config"] or "default",
                        "blocked": is_blocked,
                    }

                else:
                    self._bugs_statistics[bug_id] = {
                        test["id"]: {
                            "group": test["custom_test_group"] or "manual",
                            "config": self.run["config"] or "default",
                            "blocked": is_blocked,
                        }
                    }
        return self._bugs_statistics
def get_tests_descriptions(milestone_id, testrail_default_test_priority,
                           testrail_project):
    # To get the Tempest tests list, need to execute the following commands:
    # git clone https://github.com/openstack/tempest & cd tempest & tox -evenv
    # .tox/venv/bin/pip install nose
    get_tempest_tests = ("cd tempest && .tox/venv/bin/nosetests "
                         "--collect-only tempest/{0} -v 2>&1 | grep 'id-.*'")
    get_commit = "cd tempest && git rev-parse HEAD"
    commit = subprocess.Popen(get_commit, shell=True, stdout=subprocess.PIPE)
    logger.info("Generate test suite for tempest"
                " commit:{}".format(commit.stdout.readline()))
    custom_cases_fields = _get_custom_cases_fields(
        case_fields=testrail_project.get_case_fields(),
        project_id=testrail_project.project['id'])
    tests = {}

    for group in TEST_GROUPS:
        p = subprocess.Popen(get_tempest_tests.format(group.lower()),
                             shell=True, stdout=subprocess.PIPE)

        for line in iter(p.stdout.readline, b''):
            section = generate_groups(line) if group == "API" else group

            test_class = []
            for r in line.split("."):
                if "id-" in r:
                    title = r.strip()
                    break
                else:
                    test_class.append(r)

            steps = [{"run this tempest test": "passed"}, ]

            test_case = {
                "title": title,
                "type_id": 1,
                "milestone_id": milestone_id,
                "priority_id": testrail_default_test_priority,
                "estimate": "1m",
                "refs": "",
                "custom_report_label": title.split('id-')[1][:-1],
                "custom_test_group": ".".join(test_class),
                "custom_test_case_description": title,
                "custom_test_case_steps": steps,
                "section": section
            }
            for case_field, default_value in custom_cases_fields.items():
                if case_field not in test_case:
                    test_case[case_field] = default_value
            if section not in tests:
                tests[section] = []
            tests[section].append(test_case)
    logger.debug(tests)
    logger.info("total test cases: "
                "{}".format(sum(map(lambda x: len(x), tests.values()))))
    return tests
 def generate(self):
     for test_run in self.test_runs_stats:
         test_run_stats = test_run.bugs_statistics
         self.bugs_statistics[test_run["id"]] = dict()
         for bug, tests in test_run_stats.items():
             if bug in self.bugs_statistics[test_run["id"]]:
                 self.bugs_statistics[test_run["id"]][bug].update(tests)
             else:
                 self.bugs_statistics[test_run["id"]][bug] = tests
         logger.info("Found {0} linked bug(s)".format(len(self.bugs_statistics[test_run["id"]])))
    def bugs_statistics(self):
        if self._bugs_statistics != {}:
            return self._bugs_statistics
        logger.info('Collecting stats for TestRun "{0}" on "{1}"...'.format(
            self.run['name'], self.run['config'] or 'default config'))

        for test in self.tests:
            logger.debug('Checking "{0}" test...'.format(test['title']))
            test_results = sorted(self.project.get_results_for_test(
                test['id'], self.results),
                                  key=lambda x: x['id'],
                                  reverse=True)

            linked_bugs = []
            is_blocked = False

            for result in test_results:
                if result['status_id'] in self.blocked_statuses:
                    if self.check_blocked:
                        new_bug_link = self.handle_blocked(test, result)
                        if new_bug_link:
                            linked_bugs.append(new_bug_link)
                            is_blocked = True
                            break
                    if result['custom_launchpad_bug']:
                        linked_bugs.append(result['custom_launchpad_bug'])
                        is_blocked = True
                        break
                if result['status_id'] in self.failed_statuses \
                        and result['custom_launchpad_bug']:
                    linked_bugs.append(result['custom_launchpad_bug'])

            bug_ids = set([
                re.search(r'.*bug/(\d+)/?', link).group(1)
                for link in linked_bugs if re.search(r'.*bug/(\d+)/?', link)
            ])

            for bug_id in bug_ids:
                if bug_id in self._bugs_statistics:
                    self._bugs_statistics[bug_id][test['id']] = {
                        'group': test['custom_test_group'] or 'manual',
                        'config': self.run['config'] or 'default',
                        'blocked': is_blocked
                    }

                else:
                    self._bugs_statistics[bug_id] = {
                        test['id']: {
                            'group': test['custom_test_group'] or 'manual',
                            'config': self.run['config'] or 'default',
                            'blocked': is_blocked
                        }
                    }
        return self._bugs_statistics
 def generate(self):
     for test_run in self.test_runs_stats:
         test_run_stats = test_run.bugs_statistics
         self.bugs_statistics[test_run['id']] = dict()
         for bug, tests in test_run_stats.items():
             if bug in self.bugs_statistics[test_run['id']]:
                 self.bugs_statistics[test_run['id']][bug].update(tests)
             else:
                 self.bugs_statistics[test_run['id']][bug] = tests
         logger.info('Found {0} linked bug(s)'.format(
             len(self.bugs_statistics[test_run['id']])))
示例#13
0
    def __init__(self, project, plan_id, run_ids=(), handle_blocked=False):
        self.project = project
        self.test_plan = self.project.get_plan(plan_id)
        logger.info('Found TestPlan "{0}"'.format(self.test_plan['name']))

        self.test_runs_stats = [
            TestRunStatistics(self.project, r['id'], handle_blocked)
            for e in self.test_plan['entries'] for r in e['runs']
            if r['id'] in run_ids or len(run_ids) == 0
        ]

        self.bugs_statistics = {}
    def __init__(self, project, plan_id, run_ids=(), handle_blocked=False):
        self.project = project
        self.test_plan = self.project.get_plan(plan_id)
        logger.info('Found TestPlan "{0}"'.format(self.test_plan['name']))

        self.test_runs_stats = [
            TestRunStatistics(self.project, r['id'], handle_blocked)
            for e in self.test_plan['entries'] for r in e['runs']
            if r['id'] in run_ids or len(run_ids) == 0
        ]

        self.bugs_statistics = {}
示例#15
0
    def bugs_statistics(self):
        if self._bugs_statistics != {}:
            return self._bugs_statistics
        logger.info('Collecting stats for TestRun "{0}" on "{1}"...'.format(
            self.run['name'], self.run['config'] or 'default config'))

        for test in self.tests:
            logger.debug('Checking "{0}" test...'.format(test['title']))
            test_results = sorted(
                self.project.get_results_for_test(test['id'], self.results),
                key=lambda x: x['id'], reverse=True)

            linked_bugs = []
            is_blocked = False

            for result in test_results:
                if result['status_id'] in self.blocked_statuses:
                    if self.check_blocked:
                        new_bug_link = self.handle_blocked(test, result)
                        if new_bug_link:
                            linked_bugs.append(new_bug_link)
                            is_blocked = True
                            break
                    if result['custom_launchpad_bug']:
                        linked_bugs.append(result['custom_launchpad_bug'])
                        is_blocked = True
                        break
                if result['status_id'] in self.failed_statuses \
                        and result['custom_launchpad_bug']:
                    linked_bugs.append(result['custom_launchpad_bug'])

            bug_ids = set([re.search(r'.*bug/(\d+)/?', link).group(1)
                           for link in linked_bugs
                           if re.search(r'.*bug/(\d+)/?', link)])

            for bug_id in bug_ids:
                if bug_id in self._bugs_statistics:
                    self._bugs_statistics[bug_id][test['id']] = {
                        'group': test['custom_test_group'] or 'manual',
                        'config': self.run['config'] or 'default',
                        'blocked': is_blocked
                    }

                else:
                    self._bugs_statistics[bug_id] = {
                        test['id']: {
                            'group': test['custom_test_group'] or 'manual',
                            'config': self.run['config'] or 'default',
                            'blocked': is_blocked
                        }
                    }
        return self._bugs_statistics
示例#16
0
def get_global_failure_group_list(sub_builds,
                                  threshold=FAILURE_GROUPING.get('threshold')):
    """ Filter out and grouping of all failure reasons across all tests

    :param sub_builds: list of dict per each subbuild
    :param threshold: float -threshold
    :return: (failure_group_dict, failure_reasons): tuple or () otherwise
              where:
              failure_group_dict(all failure groups and
              associated failed test info per each failure group) - dict
              failure_reasons(all failures across all subbuild) - list
    """
    # let's find all failures in all builds
    failure_reasons = []
    failure_group_dict = {}
    failure_group_list = []
    for build in sub_builds:
        if build.get('failure_reasons'):
            for failure in build.get('failure_reasons'):
                failure_reasons.append(failure)
                failure_group_list.append(failure.get('failure'))
    # let's truncate list
    failure_group_list = list(set(failure_group_list))
    # let's update failure_group_dict
    for failure in failure_reasons:
        if failure.get('failure') in failure_group_list:
            key = failure.get('failure')
            if not failure_group_dict.get(key):
                failure_group_dict[key] = []
            failure_group_dict[key].append(failure)
    # let's find Levenshtein distance and update failure_group_dict
    for num1, key1 in enumerate(failure_group_dict):
        # pylint: disable=C0201
        for key2 in failure_group_dict.keys()[num1 + 1:]:
            # let's skip grouping if len are different more 10%
            if key1 == key2 or abs(float(len(key1) / len(key2))) >\
                    FAILURE_GROUPING.get('max_len_diff'):
                continue
            # let's find other failures which can be grouped
            # if normalized Levenshtein distance less threshold
            llen = distance(key1, key2)
            cal_threshold = float(llen) / max(len(key1), len(key2))
            if cal_threshold < threshold:
                # seems we shall combine those groups to one
                failure_group_dict[key1].extend(failure_group_dict[key2])
                logger.info("Those groups are going to be combined"
                            " due to Levenshtein distance\n"
                            " {}\n{}".format(key1, key2))
                del failure_group_dict[key2]
    return failure_group_dict, failure_reasons
def get_global_failure_group_list(
        sub_builds, threshold=FAILURE_GROUPING.get('threshold')):
    """ Filter out and grouping of all failure reasons across all tests

    :param sub_builds: list of dict per each subbuild
    :param threshold: float -threshold
    :return: (failure_group_dict, failure_reasons): tuple or () otherwise
              where:
              failure_group_dict(all failure groups and
              associated failed test info per each failure group) - dict
              failure_reasons(all failures across all subbuild) - list
    """
    # let's find all failures in all builds
    failure_reasons = []
    failure_group_dict = {}
    failure_group_list = []
    for build in sub_builds:
        if build.get('failure_reasons'):
            for failure in build.get('failure_reasons'):
                failure_reasons.append(failure)
                failure_group_list.append(failure.get('failure'))
    # let's truncate list
    failure_group_list = list(set(failure_group_list))
    # let's update failure_group_dict
    for failure in failure_reasons:
        if failure.get('failure') in failure_group_list:
            key = failure.get('failure')
            if not failure_group_dict.get(key):
                failure_group_dict[key] = []
            failure_group_dict[key].append(failure)
    # let's find Levenshtein distance and update failure_group_dict
    for num1, key1 in enumerate(failure_group_dict):
        # pylint: disable=C0201
        for key2 in failure_group_dict.keys()[num1 + 1:]:
            # let's skip grouping if len are different more 10%
            if key1 == key2 or abs(float(len(key1) / len(key2))) >\
                    FAILURE_GROUPING.get('max_len_diff'):
                continue
            # let's find other failures which can be grouped
            # if normalized Levenshtein distance less threshold
            llen = distance(key1, key2)
            cal_threshold = float(llen) / max(len(key1), len(key2))
            if cal_threshold < threshold:
                # seems we shall combine those groups to one
                failure_group_dict[key1].extend(failure_group_dict[key2])
                logger.info("Those groups are going to be combined"
                            " due to Levenshtein distance\n"
                            " {}\n{}".format(key1, key2))
                del failure_group_dict[key2]
    return failure_group_dict, failure_reasons
 def __init__(self, project, run_id, check_blocked=False):
     self.project = project
     self.run = self.project.get_run(run_id)
     self.tests = self.project.get_tests(run_id)
     self.results = self.get_results()
     logger.info(
         'Found TestRun "{0}" on "{1}" with {2} tests and {3} '
         "results".format(
             self.run["name"], self.run["config"] or "default config", len(self.tests), len(self.results)
         )
     )
     self.blocked_statuses = [self.project.get_status(s)["id"] for s in TestRailSettings.stauses["blocked"]]
     self.failed_statuses = [self.project.get_status(s)["id"] for s in TestRailSettings.stauses["failed"]]
     self.check_blocked = check_blocked
     self._bugs_statistics = {}
示例#19
0
 def __init__(self, project, run_id, check_blocked=False):
     self.project = project
     self.run = self.project.get_run(run_id)
     self.tests = self.project.get_tests(run_id)
     self.results = self.get_results()
     logger.info('Found TestRun "{0}" on "{1}" with {2} tests and {3} '
                 'results'.format(self.run['name'],
                                  self.run['config'] or 'default config',
                                  len(self.tests), len(self.results)))
     self.blocked_statuses = [self.project.get_status(s)['id']
                              for s in TestRailSettings.stauses['blocked']]
     self.failed_statuses = [self.project.get_status(s)['id']
                             for s in TestRailSettings.stauses['failed']]
     self.check_blocked = check_blocked
     self._bugs_statistics = {}
 def __init__(self, project, run_id, check_blocked=False):
     self.project = project
     self.run = self.project.get_run(run_id)
     self.tests = self.project.get_tests(run_id)
     self.results = self.get_results()
     logger.info('Found TestRun "{0}" on "{1}" with {2} tests and {3} '
                 'results'.format(self.run['name'], self.run['config']
                                  or 'default config', len(self.tests),
                                  len(self.results)))
     self.blocked_statuses = [
         self.project.get_status(s)['id']
         for s in TestRailSettings.stauses['blocked']
     ]
     self.failed_statuses = [
         self.project.get_status(s)['id']
         for s in TestRailSettings.stauses['failed']
     ]
     self.check_blocked = check_blocked
     self._bugs_statistics = {}
def add_new_tests_description(testrail_project, tests):
    test_suite = TestRailSettings.tests_suite
    suite = testrail_project.get_suite_by_name(test_suite)
    logger.info("Update suite sections")
    for group in TEST_GROUPS:
        if not testrail_project.get_section_by_name(suite["id"], group):
            testrail_project.create_section(suite["id"], group)
    api_group = testrail_project.get_section_by_name(suite["id"], "API")
    for section in TEST_SECTIONS:
        if not testrail_project.get_section_by_name(suite["id"], section):
            testrail_project.create_section(suite["id"], section,
                                            api_group["id"])

    # add test cases to test suite in 100 parallel threads
    logger.info("Add cases")
    for test_section, test_list in tests.iteritems():
        section = testrail_project.get_section_by_name(
            suite_id=suite['id'], section_name=test_section)
        Parallel(n_jobs=100)(delayed(add_case)(testrail_project, suite,
                                               test_case, section, True)
                             for test_case in test_list)
def add_new_tests_description(testrail_project, tests):
    test_suite = TestRailSettings.tests_suite
    suite = testrail_project.get_suite_by_name(test_suite)
    logger.info("Update suite sections")
    for group in TEST_GROUPS:
        if not testrail_project.get_section_by_name(suite["id"], group):
            testrail_project.create_section(suite["id"], group)
    api_group = testrail_project.get_section_by_name(suite["id"], "API")
    for section in TEST_SECTIONS:
        if not testrail_project.get_section_by_name(suite["id"], section):
            testrail_project.create_section(suite["id"], section,
                                            api_group["id"])

    # add test cases to test suite in 100 parallel threads
    logger.info("Add cases")
    for test_section, test_list in tests.iteritems():
        section = testrail_project.get_section_by_name(
            suite_id=suite['id'], section_name=test_section)
        Parallel(n_jobs=100)(delayed(add_case)
                             (testrail_project, suite, test_case,
                              section, True)
                             for test_case in test_list)
示例#23
0
def get_build_test_data(build_number, job_name,
                        jenkins_url=JENKINS.get('url')):
    """ Get build test data from Jenkins from nosetests.xml

    :param build_number: int - Jenkins build number
    :param job_name: str - Jenkins job_name
    :param jenkins_url: str - Jenkins http url
    :return: test_data: dict - build info or None otherwise
    """

    test_data = None
    logger.info('Getting subbuild {} {}'.format(job_name, build_number))
    runner_build = Build(job_name, build_number)
    buildinfo = runner_build.get_build_data(depth=0)
    if not buildinfo:
        logger.error('Getting subbuilds info is failed. '
                     'Job={} Build={}'.format(job_name, build_number))
        return test_data
    try:
        artifact_paths = [
            v for i in buildinfo.get('artifacts') for k, v in i.items()
            if k == 'relativePath'
            and v == JENKINS.get('xml_testresult_file_name')
        ][0]
        artifact_url = "/".join(
            [jenkins_url, 'job', job_name,
             str(build_number)])
        xdata = get_build_artifact(artifact_url, artifact_paths)
        test_data = xmltodict.parse(xdata, xml_attribs=True)
        test_data.update({
            'build_number': build_number,
            'job_name': job_name,
            'job_url': buildinfo.get('url'),
            'job_description': buildinfo.get('description'),
            'job_status': buildinfo.get('result')
        })
    except:
        test_data = None
    return test_data
def get_build_test_data(build_number, job_name,
                        jenkins_url=JENKINS.get('url')):
    """ Get build test data from Jenkins from nosetests.xml

    :param build_number: int - Jenkins build number
    :param job_name: str - Jenkins job_name
    :param jenkins_url: str - Jenkins http url
    :return: test_data: dict - build info or None otherwise
    """

    test_data = None
    logger.info('Getting subbuild {} {}'.format(job_name,
                                                build_number))
    runner_build = Build(job_name, build_number)
    buildinfo = runner_build.get_build_data(depth=0)
    if not buildinfo:
        logger.error('Getting subbuilds info is failed. '
                     'Job={} Build={}'.format(job_name, build_number))
        return test_data
    try:
        artifact_paths = [v for i in buildinfo.get('artifacts')
                          for k, v in i.items() if k == 'relativePath' and
                          v == JENKINS.get('xml_testresult_file_name')][0]
        artifact_url = "/".join([jenkins_url, 'job', job_name,
                                 str(build_number)])
        xdata = get_build_artifact(artifact_url, artifact_paths)
        test_data = xmltodict.parse(xdata, xml_attribs=True)
        test_data.update({'build_number': build_number,
                          'job_name': job_name,
                          'job_url': buildinfo.get('url'),
                          'job_description':
                              buildinfo.get('description'),
                          'job_status': buildinfo.get('result')})
    except:
        test_data = None
    return test_data
def main():
    parser = argparse.ArgumentParser(
        description="Generate statistics for bugs linked to TestRun. Publish " "statistics to testrail if necessary."
    )
    parser.add_argument("plan_id", type=int, nargs="?", default=None, help="Test plan ID in TestRail")
    parser.add_argument(
        "-j",
        "--job-name",
        dest="job_name",
        type=str,
        default=None,
        help="Name of Jenkins job which runs tests (runner). " "It will be used for TestPlan search instead ID",
    )
    parser.add_argument("-n", "--build-number", dest="build_number", default="latest", help="Jenkins job build number")
    parser.add_argument(
        "-r", "--run-id", dest="run_ids", type=str, default=None, help="(optional) IDs of TestRun to check (skip other)"
    )
    parser.add_argument(
        "-b",
        "--handle-blocked",
        action="store_true",
        dest="handle_blocked",
        default=False,
        help="Copy bugs links to downstream blocked results",
    )
    parser.add_argument(
        "-s",
        "--separate-runs",
        action="store_true",
        dest="separate_runs",
        default=False,
        help="Create separate statistics for each test run",
    )
    parser.add_argument("-p", "--publish", action="store_true", help="Publish statistics to TestPlan description")
    parser.add_argument(
        "-o",
        "--out-file",
        dest="output_file",
        default=None,
        type=str,
        help="Path to file to save statistics as JSON and/or " "HTML. Filename extension is added automatically",
    )
    parser.add_argument(
        "-H",
        "--html",
        action="store_true",
        help="Save statistics in HTML format to file " "(used with --out-file option)",
    )
    parser.add_argument(
        "-q",
        "--quiet",
        action="store_true",
        help="Be quiet (disable logging except critical) " 'Overrides "--verbose" option.',
    )
    parser.add_argument("-v", "--verbose", action="store_true", help="Enable debug logging.")

    args = parser.parse_args()

    if args.verbose:
        logger.setLevel(DEBUG)

    if args.quiet:
        logger.setLevel(CRITICAL)

    testrail_project = get_testrail()

    if args.job_name:
        logger.info(
            "Inspecting {0} build of {1} Jenkins job for TestPlan "
            "details...".format(args.build_number, args.job_name)
        )
        test_plan_name = generate_test_plan_name(args.job_name, args.build_number)
        test_plan = testrail_project.get_plan_by_name(test_plan_name)
        if test_plan:
            args.plan_id = test_plan["id"]
        else:
            logger.warning('TestPlan "{0}" not found!'.format(test_plan_name))

    if not args.plan_id:
        logger.error("There is no TestPlan to process, exiting...")
        return 1

    run_ids = () if not args.run_ids else tuple(int(arg) for arg in args.run_ids.split(","))

    generator = StatisticsGenerator(testrail_project, args.plan_id, run_ids, args.handle_blocked)
    generator.generate()
    stats = generator.dump()

    if args.publish:
        logger.debug("Publishing bugs statistics to TestRail..")
        generator.publish(stats)

    if args.output_file:
        html = generator.dump_html(stats) if args.html else args.html
        save_stats_to_file(stats, args.output_file, html)

        if args.separate_runs:
            for run in generator.test_runs_stats:
                file_name = "{0}_{1}".format(args.output_file, run["id"])
                stats = generator.dump(run_id=run["id"])
                html = generator.dump_html(stats, run["id"]) if args.html else args.html
                save_stats_to_file(stats, file_name, html)

    logger.info("Statistics generation complete!")
示例#26
0
文件: report.py 项目: avgoor/fuel-qa
def main():

    parser = OptionParser(
        description="Publish results of system tests from Jenkins build to "
                    "TestRail. See settings.py for configuration."
    )
    parser.add_option('-j', '--job-name', dest='job_name', default=None,
                      help='Jenkins swarm runner job name')
    parser.add_option('-N', '--build-number', dest='build_number',
                      default='latest',
                      help='Jenkins swarm runner build number')
    parser.add_option('-o', '--one-job', dest='one_job_name',
                      default=None,
                      help=('Process only one job name from the specified '
                            'parent job or view'))
    parser.add_option("-w", "--view", dest="jenkins_view", default=False,
                      help="Get system tests jobs from Jenkins view")
    parser.add_option("-l", "--live", dest="live_report", action="store_true",
                      help="Get tests results from running swarm")
    parser.add_option("-m", "--manual", dest="manual_run", action="store_true",
                      help="Manually add tests cases to TestRun (tested only)")
    parser.add_option('-c', '--create-plan-only', action="store_true",
                      dest="create_plan_only", default=False,
                      help='Jenkins swarm runner job name')
    parser.add_option("-v", "--verbose",
                      action="store_true", dest="verbose", default=False,
                      help="Enable debug output")

    (options, _) = parser.parse_args()

    if options.verbose:
        logger.setLevel(DEBUG)

    if options.live_report and options.build_number == 'latest':
        options.build_number = 'latest_started'

    # STEP #1
    # Initialize TestRail Project and define configuration
    logger.info('Initializing TestRail Project configuration...')
    project = TestRailProject(url=TestRailSettings.url,
                              user=TestRailSettings.user,
                              password=TestRailSettings.password,
                              project=TestRailSettings.project)

    tests_suite = project.get_suite_by_name(TestRailSettings.tests_suite)
    operation_systems = [{'name': config['name'], 'id': config['id'],
                         'distro': config['name'].split()[0].lower()}
                         for config in project.get_config_by_name(
                             'Operation System')['configs'] if
                         config['name'] in TestRailSettings.operation_systems]
    tests_results = {os['distro']: [] for os in operation_systems}

    # STEP #2
    # Get tests results from Jenkins
    logger.info('Getting tests results from Jenkins...')
    if options.jenkins_view:
        jobs = get_jobs_for_view(options.jenkins_view)
        tests_jobs = [{'name': j, 'number': 'latest'}
                      for j in jobs if 'system_test' in j] if \
            not options.create_plan_only else []
        runner_job = [j for j in jobs if 'runner' in j][0]
        runner_build = Build(runner_job, 'latest')
    elif options.job_name:
        runner_build = Build(options.job_name, options.build_number)
        tests_jobs = get_downstream_builds(runner_build.build_data) if \
            not options.create_plan_only else []
    else:
        logger.error("Please specify either Jenkins swarm runner job name (-j)"
                     " or Jenkins view with system tests jobs (-w). Exiting..")
        return

    for systest_build in tests_jobs:
        if (options.one_job_name and
                options.one_job_name != systest_build['name']):
            logger.debug("Skipping '{0}' because --one-job is specified"
                         .format(systest_build['name']))
            continue
        if options.job_name:
            if 'result' not in systest_build.keys():
                logger.debug("Skipping '{0}' job because it does't run tests "
                             "(build #{1} contains no results)".format(
                                 systest_build['name'],
                                 systest_build['number']))
                continue
            if systest_build['result'] is None:
                logger.debug("Skipping '{0}' job (build #{1}) because it's sti"
                             "ll running...".format(systest_build['name'],
                                                    systest_build['number'],))
                continue
        for os in tests_results.keys():
            if os in systest_build['name'].lower():
                tests_results[os].extend(get_tests_results(systest_build, os))

    # STEP #3
    # Create new TestPlan in TestRail (or get existing) and add TestRuns
    milestone, iso_number, prefix = get_version(runner_build.build_data)
    milestone = project.get_milestone_by_name(name=milestone)

    test_plan_name = ' '.join(
        filter(lambda x: bool(x),
               (milestone['name'], prefix, 'iso', '#' + str(iso_number))))

    test_plan = project.get_plan_by_name(test_plan_name)
    iso_link = '/'.join([JENKINS['url'], 'job',
                         '{0}.all'.format(milestone['name']), str(iso_number)])
    if not test_plan:
        test_plan = project.add_plan(test_plan_name,
                                     description=iso_link,
                                     milestone_id=milestone['id'],
                                     entries=[]
                                     )
        logger.info('Created new TestPlan "{0}".'.format(test_plan_name))
    else:
        logger.info('Found existing TestPlan "{0}".'.format(test_plan_name))

    if options.create_plan_only:
        return

    plan_entries = []
    all_cases = project.get_cases(suite_id=tests_suite['id'])
    for os in operation_systems:
        cases_ids = []
        if options.manual_run:
            all_results_groups = [r.group for r in tests_results[os['distro']]]
            for case in all_cases:
                if case['custom_test_group'] in all_results_groups:
                    cases_ids.append(case['id'])
        plan_entries.append(
            project.test_run_struct(
                name='{suite_name}'.format(suite_name=tests_suite['name']),
                suite_id=tests_suite['id'],
                milestone_id=milestone['id'],
                description='Results of system tests ({tests_suite}) on is'
                'o #"{iso_number}"'.format(tests_suite=tests_suite['name'],
                                           iso_number=iso_number),
                config_ids=[os['id']],
                include_all=True,
                case_ids=cases_ids
            )
        )

    if not any(entry['suite_id'] == tests_suite['id']
               for entry in test_plan['entries']):
        if project.add_plan_entry(plan_id=test_plan['id'],
                                  suite_id=tests_suite['id'],
                                  config_ids=[os['id'] for os
                                              in operation_systems],
                                  runs=plan_entries):
            test_plan = project.get_plan(test_plan['id'])

    # STEP #4
    # Upload tests results to TestRail
    logger.info('Uploading tests results to TestRail...')
    for os in operation_systems:
        logger.info('Checking tests results for "{0}"...'.format(os['name']))
        results_to_publish = publish_results(
            project=project,
            milestone_id=milestone['id'],
            test_plan=test_plan,
            suite_id=tests_suite['id'],
            config_id=os['id'],
            results=tests_results[os['distro']]
        )
        logger.debug('Added new results for tests ({os}): {tests}'.format(
            os=os['name'], tests=[r.group for r in results_to_publish]
        ))

    logger.info('Report URL: {0}'.format(test_plan['url']))
示例#27
0
def main():
    parser = argparse.ArgumentParser(
        description="Generate statistics for bugs linked to TestRun. Publish "
                    "statistics to testrail if necessary."
    )
    parser.add_argument('plan_id', type=int, nargs='?', default=None,
                        help='Test plan ID in TestRail')
    parser.add_argument('-j', '--job-name',
                        dest='job_name', type=str, default=None,
                        help='Name of Jenkins job which runs tests (runner). '
                             'It will be used for TestPlan search instead ID')
    parser.add_argument('-n', '--build-number', dest='build_number',
                        default='latest', help='Jenkins job build number')
    parser.add_argument('-r', '--run-id',
                        dest='run_ids', type=str, default=None,
                        help='(optional) IDs of TestRun to check (skip other)')
    parser.add_argument('-b', '--handle-blocked', action="store_true",
                        dest='handle_blocked', default=False,
                        help='Copy bugs links to downstream blocked results')
    parser.add_argument('-s', '--separate-runs', action="store_true",
                        dest='separate_runs', default=False,
                        help='Create separate statistics for each test run')
    parser.add_argument('-p', '--publish', action="store_true",
                        help='Publish statistics to TestPlan description')
    parser.add_argument('-o', '--out-file', dest='output_file',
                        default=None, type=str,
                        help='Path to file to save statistics as JSON and/or '
                             'HTML. Filename extension is added automatically')
    parser.add_argument('-H', '--html', action="store_true",
                        help='Save statistics in HTML format to file '
                             '(used with --out-file option)')
    parser.add_argument('-q', '--quiet', action="store_true",
                        help='Be quiet (disable logging except critical) '
                             'Overrides "--verbose" option.')
    parser.add_argument("-v", "--verbose", action="store_true",
                        help="Enable debug logging.")

    args = parser.parse_args()

    if args.verbose:
        logger.setLevel(DEBUG)

    if args.quiet:
        logger.setLevel(CRITICAL)

    testrail_project = get_testrail()

    if args.job_name:
        logger.info('Inspecting {0} build of {1} Jenkins job for TestPlan '
                    'details...'.format(args.build_number, args.job_name))
        test_plan_name = generate_test_plan_name(args.job_name,
                                                 args.build_number)
        test_plan = testrail_project.get_plan_by_name(test_plan_name)
        if test_plan:
            args.plan_id = test_plan['id']
        else:
            logger.warning('TestPlan "{0}" not found!'.format(test_plan_name))

    if not args.plan_id:
        logger.error('There is no TestPlan to process, exiting...')
        return 1

    run_ids = () if not args.run_ids else tuple(
        int(arg) for arg in args.run_ids.split(','))

    generator = StatisticsGenerator(testrail_project,
                                    args.plan_id,
                                    run_ids,
                                    args.handle_blocked)
    generator.generate()
    stats = generator.dump()

    if args.publish:
        logger.debug('Publishing bugs statistics to TestRail..')
        generator.publish(stats)

    if args.output_file:
        html = generator.dump_html(stats) if args.html else args.html
        save_stats_to_file(stats, args.output_file, html)

        if args.separate_runs:
            for run in generator.test_runs_stats:
                file_name = '{0}_{1}'.format(args.output_file, run['id'])
                stats = generator.dump(run_id=run['id'])
                html = (generator.dump_html(stats, run['id']) if args.html
                        else args.html)
                save_stats_to_file(stats, file_name, html)

    logger.info('Statistics generation complete!')
示例#28
0
def main():
    """
    :param argv: command line arguments
    :return: None
    """

    parser = argparse.ArgumentParser(description='Get downstream build info'
                                     ' for Jenkins swarm.runner build.'
                                     ' Generate matrix statistics:'
                                     ' (failure group -> builds & tests).'
                                     ' Publish matrix to Testrail'
                                     ' if necessary.')
    parser.add_argument('-n',
                        '--build-number',
                        type=int,
                        required=False,
                        dest='build_number',
                        help='Jenkins job build number')
    parser.add_argument('-j',
                        '--job-name',
                        type=str,
                        dest='job_name',
                        default='9.0.swarm.runner',
                        help='Name of Jenkins job which runs tests (runner)')
    parser.add_argument('-f',
                        '--format',
                        type=str,
                        dest='formatfile',
                        default='html',
                        help='format statistics: html,json,table')
    parser.add_argument('-o',
                        '--out',
                        type=str,
                        dest="fileoutput",
                        default='failure_groups_statistics',
                        help='Save statistics to file')
    parser.add_argument('-t',
                        '--track',
                        action="store_true",
                        help='Publish statistics to TestPlan description')
    parser.add_argument('-q',
                        '--quiet',
                        action="store_true",
                        help='Be quiet (disable logging except critical) '
                        'Overrides "--verbose" option.')
    parser.add_argument("-v",
                        "--verbose",
                        action="store_true",
                        help="Enable debug logging.")
    args = parser.parse_args()

    if args.verbose:
        logger.setLevel(DEBUG)
    if args.quiet:
        logger.setLevel(CRITICAL)
    if args.formatfile and\
       args.formatfile not in ['json', 'html', 'xls', 'xlsx', 'yaml', 'csv']:
        logger.info('Not supported format output. Exit')
        return 2
    if not args.build_number:
        runner_build = Build(args.job_name, 'latest')
        logger.info('Latest build number is {}. Job is {}'.format(
            runner_build.number, args.job_name))
        args.build_number = runner_build.number

    logger.info('Getting subbuilds for {} {}'.format(args.job_name,
                                                     args.build_number))
    subbuilds, swarm_jenkins_info = get_sub_builds(args.build_number)
    if not subbuilds or not swarm_jenkins_info:
        logger.error('Necessary subbuilds info are absent. Exit')
        return 3
    logger.info('{} Subbuilds have been found'.format(len(subbuilds)))

    logger.info('Calculating failure groups')
    failure_gd = get_global_failure_group_list(subbuilds)[0]
    if not failure_gd:
        logger.error('Necessary failure grpoup info are absent. Exit')
        return 4
    logger.info('{} Failure groups have been found'.format(len(failure_gd)))

    logger.info('Getting TestRail data')
    testrail_testdata = get_testrail_testdata(args.job_name, args.build_number)
    if not testrail_testdata:
        logger.error('Necessary testrail info are absent. Exit')
        return 5
    logger.info('TestRail data have been downloaded')

    logger.info('Getting TestRail bugs')
    testrail_bugs = get_bugs(subbuilds, testrail_testdata)
    if not testrail_bugs:
        logger.error('Necessary testrail bugs info are absent. Exit')
        return 6
    logger.info('TestRail bugs have been got')

    logger.info('Update subbuilds data')
    update_subbuilds_failuregroup(subbuilds, failure_gd, testrail_testdata,
                                  testrail_bugs)
    logger.info('Subbuilds data have been updated')

    logger.info('Generating statistics across all failure groups')
    statistics = get_statistics(failure_gd, format_out=args.formatfile)
    if not statistics:
        logger.error('Necessary statistics info are absent. Exit')
        return 7
    logger.info('Statistics have been generated')

    if args.fileoutput and args.formatfile:
        logger.info('Save statistics')
        dump_statistics(statistics, args.build_number, args.job_name,
                        args.formatfile, args.fileoutput)
        logger.info('Statistics have been saved')
    if args.track:
        logger.info('Publish statistics to TestRail')
        if publish_statistics(statistics, args.build_number, args.job_name):
            logger.info('Statistics have been published')
        else:
            logger.info('Statistics have not been published'
                        'due to internal issue')
def main():
    """
    :param argv: command line arguments
    :return: None
    """

    parser = argparse.ArgumentParser(description='Get downstream build info'
                                     ' for Jenkins swarm.runner build.'
                                     ' Generate matrix statistics:'
                                     ' (failure group -> builds & tests).'
                                     ' Publish matrix to Testrail'
                                     ' if necessary.')
    parser.add_argument('-n', '--build-number', type=int, required=False,
                        dest='build_number', help='Jenkins job build number')
    parser.add_argument('-j', '--job-name', type=str,
                        dest='job_name', default='9.0.swarm.runner',
                        help='Name of Jenkins job which runs tests (runner)')
    parser.add_argument('-f', '--format', type=str, dest='formatfile',
                        default='html',
                        help='format statistics: html,json,table')
    parser.add_argument('-o', '--out', type=str, dest="fileoutput",
                        default='failure_groups_statistics',
                        help='Save statistics to file')
    parser.add_argument('-t', '--track', action="store_true",
                        help='Publish statistics to TestPlan description')
    parser.add_argument('-q', '--quiet', action="store_true",
                        help='Be quiet (disable logging except critical) '
                             'Overrides "--verbose" option.')
    parser.add_argument("-v", "--verbose", action="store_true",
                        help="Enable debug logging.")
    args = parser.parse_args()

    if args.verbose:
        logger.setLevel(DEBUG)
    if args.quiet:
        logger.setLevel(CRITICAL)
    if args.formatfile and\
       args.formatfile not in ['json', 'html', 'xls', 'xlsx', 'yaml', 'csv']:
        logger.info('Not supported format output. Exit')
        return 2
    if not args.build_number:
        runner_build = Build(args.job_name, 'latest')
        logger.info('Latest build number is {}. Job is {}'.
                    format(runner_build.number, args.job_name))
        args.build_number = runner_build.number

    logger.info('Getting subbuilds for {} {}'.format(args.job_name,
                                                     args.build_number))
    subbuilds, swarm_jenkins_info = get_sub_builds(args.build_number)
    if not subbuilds or not swarm_jenkins_info:
        logger.error('Necessary subbuilds info are absent. Exit')
        return 3
    logger.info('{} Subbuilds have been found'.format(len(subbuilds)))

    logger.info('Calculating failure groups')
    failure_gd = get_global_failure_group_list(subbuilds)[0]
    if not failure_gd:
        logger.error('Necessary failure grpoup info are absent. Exit')
        return 4
    logger.info('{} Failure groups have been found'.format(len(failure_gd)))

    logger.info('Getting TestRail data')
    testrail_testdata = get_testrail_testdata(args.job_name,
                                              args.build_number)
    if not testrail_testdata:
        logger.error('Necessary testrail info are absent. Exit')
        return 5
    logger.info('TestRail data have been downloaded')

    logger.info('Getting TestRail bugs')
    testrail_bugs = get_bugs(subbuilds, testrail_testdata)
    if not testrail_bugs:
        logger.error('Necessary testrail bugs info are absent. Exit')
        return 6
    logger.info('TestRail bugs have been got')

    logger.info('Update subbuilds data')
    update_subbuilds_failuregroup(subbuilds, failure_gd,
                                  testrail_testdata,
                                  testrail_bugs)
    logger.info('Subbuilds data have been updated')

    logger.info('Generating statistics across all failure groups')
    statistics = get_statistics(failure_gd, format_out=args.formatfile)
    if not statistics:
        logger.error('Necessary statistics info are absent. Exit')
        return 7
    logger.info('Statistics have been generated')

    if args.fileoutput and args.formatfile:
        logger.info('Save statistics')
        dump_statistics(statistics, args.build_number, args.job_name,
                        args.formatfile, args.fileoutput)
        logger.info('Statistics have been saved')
    if args.track:
        logger.info('Publish statistics to TestRail')
        if publish_statistics(statistics, args.build_number, args.job_name):
            logger.info('Statistics have been published')
        else:
            logger.info('Statistics have not been published'
                        'due to internal issue')
示例#30
0
def main():
    parser = OptionParser(
        description="Publish results of system tests from Jenkins build to "
        "TestRail. See settings.py for configuration.")
    parser.add_option('-j',
                      '--job-name',
                      dest='job_name',
                      default=None,
                      help='Jenkins swarm runner job name')
    parser.add_option('-N',
                      '--build-number',
                      dest='build_number',
                      default='latest',
                      help='Jenkins swarm runner build number')
    parser.add_option("-l",
                      "--live",
                      dest="live_report",
                      action="store_true",
                      help="Get tests results from running swarm")
    parser.add_option("-v",
                      "--verbose",
                      action="store_true",
                      dest="verbose",
                      default=False,
                      help="Enable debug output")

    (options, _) = parser.parse_args()

    if options.verbose:
        logger.setLevel(DEBUG)

    if options.live_report and options.build_number == 'latest':
        build_number = 'latest_started'
    else:
        build_number = options.build_number

    # STEP #1
    # Initialize TestRail Project and define configuration
    logger.info('Initializing TestRail Project configuration...')
    project = TestRailProject(url=TestRailSettings.url,
                              user=TestRailSettings.user,
                              password=TestRailSettings.password,
                              project=TestRailSettings.project)
    logger.info('Initializing TestRail Project configuration... done')

    operation_systems = [{
        'name': config['name'],
        'id': config['id'],
        'distro': config['name'].split()[0].lower()
    } for config in project.get_config_by_name('Operation System')['configs']]
    os_mile = {
        '6.1': ['Centos 6.5', 'Ubuntu 14.04'],
        '6.0.1': ['Centos 6.5', 'Ubuntu 12.04']
    }

    tests_results = {}

    # STEP #2
    # Get tests results from Jenkins
    runner_build = Build(options.job_name, build_number)
    runs = runner_build.build_data['runs']

    # Analyze each test individually
    for run_one in runs:
        if '5.1' in run_one['url']:
            continue  # Release 5.1 to skip
        tests_result = get_job_info(run_one['url'])
        if not tests_result['description']:
            continue  # Not completed results to skip
        if 'skipping' in tests_result['description']:
            continue  # Not performed tests to skip
        tests_job = {
            'result': tests_result['result'],
            'name':
            (options.job_name + '/' + tests_result['url'].split('/')[-3]),
            'number': int(tests_result['url'].split('/')[-2]),
            'mile': (tests_result['description'].split()[0].split('-')[0]),
            'iso': (int(tests_result['description'].split()[0].split('-')[1]))
        }
        if tests_job['mile'] not in tests_results:
            tests_results[tests_job['mile']] = {}
        test_mile = tests_results[tests_job['mile']]
        if tests_job['iso'] not in test_mile:
            test_mile[tests_job['iso']] = {}
        test_iso = test_mile[tests_job['iso']]
        for os in operation_systems:
            if os['distro'] in tests_job['name'].lower() and\
                    os['name'] in os_mile[tests_job['mile']]:
                if os['id'] not in test_iso:
                    test_iso[os['id']] = []
                test_os_id = test_iso[os['id']]
                test_os_id.extend(get_tests_results(tests_job, os['distro']))

    # STEP #3
    # Create new TestPlan in TestRail (or get existing) and add TestRuns
    for mile in tests_results:
        mile_tests_suite = '{0}{1}'.format(TestRailSettings.tests_suite, mile)
        logger.info(mile_tests_suite)
        tests_suite = project.get_suite_by_name(mile_tests_suite)
        milestone = project.get_milestone_by_name(name=mile)
        for iso_number in tests_results.get(mile, {}):
            # Create new TestPlan name check the same name in testrail
            test_plan_name = '{milestone} iso #{iso_number}'.format(
                milestone=milestone['name'], iso_number=iso_number)
            test_plan = project.get_plan_by_name(test_plan_name)
            if not test_plan:
                test_plan = project.add_plan(test_plan_name,
                                             description='/'.join([
                                                 JENKINS['url'], 'job',
                                                 '{0}.all'.format(
                                                     milestone['name']),
                                                 str(iso_number)
                                             ]),
                                             milestone_id=milestone['id'],
                                             entries=[])
                logger.info(
                    'Created new TestPlan "{0}".'.format(test_plan_name))
            else:
                logger.info(
                    'Found existing TestPlan "{0}".'.format(test_plan_name))
            plan_entries = []
            # Create a test plan entry
            config_ids = []
            for os in operation_systems:
                if os['name'] in os_mile[mile]:
                    config_ids.append(os['id'])
                    cases_ids = []
                    plan_entries.append(
                        project.test_run_struct(
                            name=tests_suite['name'],
                            suite_id=tests_suite['id'],
                            milestone_id=milestone['id'],
                            description=('Results of system tests ({t_suite})'
                                         ' on iso #"{iso_number}"'.format(
                                             t_suite=tests_suite['name'],
                                             iso_number=iso_number)),
                            config_ids=[os['id']],
                            include_all=True,
                            case_ids=cases_ids))
            # Create a test plan entry with the test run
            run = find_run_by_name(test_plan, tests_suite['name'])
            if not run:
                logger.info('Adding a test plan entry with test run %s ...',
                            tests_suite['name'])
                entry = project.add_plan_entry(plan_id=test_plan['id'],
                                               suite_id=tests_suite['id'],
                                               config_ids=config_ids,
                                               runs=plan_entries)
                logger.info('The test plan entry has been added.')
                run = entry['runs'][0]
            test_plan = project.get_plan(test_plan['id'])

            # STEP #4
            # Upload tests results to TestRail
            logger.info('Uploading tests results to TestRail...')
            for os_id in tests_results.get(mile, {})\
                    .get(iso_number, {}):
                logger.info('Checking tests results for %s...',
                            project.get_config(os_id)['name'])
                tests_added = publish_results(
                    project=project,
                    milestone_id=milestone['id'],
                    test_plan=test_plan,
                    suite_id=tests_suite['id'],
                    config_id=os_id,
                    results=tests_results[mile][iso_number][os_id])
                logger.debug('Added new results for tests (%s): %s',
                             project.get_config(os_id)['name'],
                             [r.group for r in tests_added])

            logger.info('Report URL: %s', test_plan['url'])
def get_tests_descriptions(milestone_id, testrail_default_test_priority,
                           testrail_project):
    # To get the Tempest tests list, need to execute the following commands:
    # git clone https://github.com/openstack/tempest & cd tempest & tox -evenv
    # .tox/venv/bin/pip install nose
    get_tempest_tests = ("cd tempest && .tox/venv/bin/nosetests "
                         "--collect-only tempest/{0} -v 2>&1 | grep 'id-.*'")
    get_commit = "cd tempest && git rev-parse HEAD"
    commit = subprocess.Popen(get_commit, shell=True, stdout=subprocess.PIPE)
    logger.info("Generate test suite for tempest"
                " commit:{}".format(commit.stdout.readline()))
    custom_cases_fields = _get_custom_cases_fields(
        case_fields=testrail_project.get_case_fields(),
        project_id=testrail_project.project['id'])
    tests = {}

    for group in TEST_GROUPS:
        p = subprocess.Popen(get_tempest_tests.format(group.lower()),
                             shell=True,
                             stdout=subprocess.PIPE)

        for line in iter(p.stdout.readline, b''):
            section = generate_groups(line) if group == "API" else group

            test_class = []
            for r in line.split("."):
                if "id-" in r:
                    title = r.strip()
                    break
                else:
                    test_class.append(r)

            steps = [
                {
                    "run this tempest test": "passed"
                },
            ]

            test_case = {
                "title": title,
                "type_id": 1,
                "milestone_id": milestone_id,
                "priority_id": testrail_default_test_priority,
                "estimate": "1m",
                "refs": "",
                "custom_report_label": title.split('id-')[1][:-1],
                "custom_test_group": ".".join(test_class),
                "custom_test_case_description": title,
                "custom_test_case_steps": steps,
                "section": section
            }
            for case_field, default_value in custom_cases_fields.items():
                if case_field not in test_case:
                    test_case[case_field] = default_value
            if section not in tests:
                tests[section] = []
            tests[section].append(test_case)
    logger.debug(tests)
    logger.info("total test cases: "
                "{}".format(sum(map(lambda x: len(x), tests.values()))))
    return tests
示例#32
0
def main():

    parser = OptionParser(
        description="Publish results of system tests from Jenkins build to "
        "TestRail. See settings.py for configuration.")
    parser.add_option('-j',
                      '--job-name',
                      dest='job_name',
                      default=None,
                      help='Jenkins swarm runner job name')
    parser.add_option('-N',
                      '--build-number',
                      dest='build_number',
                      default='latest',
                      help='Jenkins swarm runner build number')
    parser.add_option('-o',
                      '--one-job',
                      dest='one_job_name',
                      default=None,
                      help=('Process only one job name from the specified '
                            'parent job or view'))
    parser.add_option("-w",
                      "--view",
                      dest="jenkins_view",
                      default=False,
                      help="Get system tests jobs from Jenkins view")
    parser.add_option("-l",
                      "--live",
                      dest="live_report",
                      action="store_true",
                      help="Get tests results from running swarm")
    parser.add_option("-m",
                      "--manual",
                      dest="manual_run",
                      action="store_true",
                      help="Manually add tests cases to TestRun (tested only)")
    parser.add_option('-c',
                      '--create-plan-only',
                      action="store_true",
                      dest="create_plan_only",
                      default=False,
                      help='Jenkins swarm runner job name')
    parser.add_option("-v",
                      "--verbose",
                      action="store_true",
                      dest="verbose",
                      default=False,
                      help="Enable debug output")

    (options, _) = parser.parse_args()

    if options.verbose:
        logger.setLevel(DEBUG)

    if options.live_report and options.build_number == 'latest':
        options.build_number = 'latest_started'

    # STEP #1
    # Initialize TestRail Project and define configuration
    logger.info('Initializing TestRail Project configuration...')
    project = TestRailProject(url=TestRailSettings.url,
                              user=TestRailSettings.user,
                              password=TestRailSettings.password,
                              project=TestRailSettings.project)

    tests_suite = project.get_suite_by_name(TestRailSettings.tests_suite)
    operation_systems = [
        {
            'name': config['name'],
            'id': config['id'],
            'distro': config['name'].split()[0].lower()
        }
        for config in project.get_config_by_name('Operation System')['configs']
        if config['name'] in TestRailSettings.operation_systems
    ]
    tests_results = {os['distro']: [] for os in operation_systems}

    # STEP #2
    # Get tests results from Jenkins
    logger.info('Getting tests results from Jenkins...')
    if options.jenkins_view:
        jobs = get_jobs_for_view(options.jenkins_view)
        tests_jobs = [{'name': j, 'number': 'latest'}
                      for j in jobs if 'system_test' in j] if \
            not options.create_plan_only else []
        runner_job = [j for j in jobs if 'runner' in j][0]
        runner_build = Build(runner_job, 'latest')
    elif options.job_name:
        runner_build = Build(options.job_name, options.build_number)
        tests_jobs = get_downstream_builds(runner_build.build_data) if \
            not options.create_plan_only else []
    else:
        logger.error("Please specify either Jenkins swarm runner job name (-j)"
                     " or Jenkins view with system tests jobs (-w). Exiting..")
        return

    for systest_build in tests_jobs:
        if (options.one_job_name
                and options.one_job_name != systest_build['name']):
            logger.debug(
                "Skipping '{0}' because --one-job is specified".format(
                    systest_build['name']))
            continue
        if options.job_name:
            if 'result' not in systest_build.keys():
                logger.debug("Skipping '{0}' job because it does't run tests "
                             "(build #{1} contains no results)".format(
                                 systest_build['name'],
                                 systest_build['number']))
                continue
            if systest_build['result'] is None:
                logger.debug("Skipping '{0}' job (build #{1}) because it's sti"
                             "ll running...".format(
                                 systest_build['name'],
                                 systest_build['number'],
                             ))
                continue
        for os in tests_results.keys():
            if os in systest_build['name'].lower():
                tests_results[os].extend(get_tests_results(systest_build, os))

    # STEP #3
    # Create new TestPlan in TestRail (or get existing) and add TestRuns
    milestone, iso_number, prefix = get_version(runner_build.build_data)
    milestone = project.get_milestone_by_name(name=milestone)

    test_plan_name = ' '.join(
        filter(lambda x: bool(x),
               (milestone['name'], prefix, 'iso', '#' + str(iso_number))))

    test_plan = project.get_plan_by_name(test_plan_name)
    iso_link = '/'.join([
        JENKINS['url'], 'job', '{0}.all'.format(milestone['name']),
        str(iso_number)
    ])
    if not test_plan:
        test_plan = project.add_plan(test_plan_name,
                                     description=iso_link,
                                     milestone_id=milestone['id'],
                                     entries=[])
        logger.info('Created new TestPlan "{0}".'.format(test_plan_name))
    else:
        logger.info('Found existing TestPlan "{0}".'.format(test_plan_name))

    if options.create_plan_only:
        return

    plan_entries = []
    all_cases = project.get_cases(suite_id=tests_suite['id'])
    for os in operation_systems:
        cases_ids = []
        if options.manual_run:
            all_results_groups = [r.group for r in tests_results[os['distro']]]
            for case in all_cases:
                if case['custom_test_group'] in all_results_groups:
                    cases_ids.append(case['id'])
        plan_entries.append(
            project.test_run_struct(
                name='{suite_name}'.format(suite_name=tests_suite['name']),
                suite_id=tests_suite['id'],
                milestone_id=milestone['id'],
                description='Results of system tests ({tests_suite}) on is'
                'o #"{iso_number}"'.format(tests_suite=tests_suite['name'],
                                           iso_number=iso_number),
                config_ids=[os['id']],
                include_all=True,
                case_ids=cases_ids))

    if not any(entry['suite_id'] == tests_suite['id']
               for entry in test_plan['entries']):
        if project.add_plan_entry(
                plan_id=test_plan['id'],
                suite_id=tests_suite['id'],
                config_ids=[os['id'] for os in operation_systems],
                runs=plan_entries):
            test_plan = project.get_plan(test_plan['id'])

    # STEP #4
    # Upload tests results to TestRail
    logger.info('Uploading tests results to TestRail...')
    for os in operation_systems:
        logger.info('Checking tests results for "{0}"...'.format(os['name']))
        results_to_publish = publish_results(
            project=project,
            milestone_id=milestone['id'],
            test_plan=test_plan,
            suite_id=tests_suite['id'],
            config_id=os['id'],
            results=tests_results[os['distro']])
        logger.debug('Added new results for tests ({os}): {tests}'.format(
            os=os['name'], tests=[r.group for r in results_to_publish]))

    logger.info('Report URL: {0}'.format(test_plan['url']))
def main():
    parser = argparse.ArgumentParser(
        description="Generate statistics for bugs linked to TestRun. Publish "
        "statistics to testrail if necessary.")
    parser.add_argument('plan_id',
                        type=int,
                        nargs='?',
                        default=None,
                        help='Test plan ID in TestRail')
    parser.add_argument('-j',
                        '--job-name',
                        dest='job_name',
                        type=str,
                        default=None,
                        help='Name of Jenkins job which runs tests (runner). '
                        'It will be used for TestPlan search instead ID')
    parser.add_argument('-n',
                        '--build-number',
                        dest='build_number',
                        default='latest',
                        help='Jenkins job build number')
    parser.add_argument('-r',
                        '--run-id',
                        dest='run_ids',
                        type=str,
                        default=None,
                        help='(optional) IDs of TestRun to check (skip other)')
    parser.add_argument('-b',
                        '--handle-blocked',
                        action="store_true",
                        dest='handle_blocked',
                        default=False,
                        help='Copy bugs links to downstream blocked results')
    parser.add_argument('-s',
                        '--separate-runs',
                        action="store_true",
                        dest='separate_runs',
                        default=False,
                        help='Create separate statistics for each test run')
    parser.add_argument('-p',
                        '--publish',
                        action="store_true",
                        help='Publish statistics to TestPlan description')
    parser.add_argument('-o',
                        '--out-file',
                        dest='output_file',
                        default=None,
                        type=str,
                        help='Path to file to save statistics as JSON and/or '
                        'HTML. Filename extension is added automatically')
    parser.add_argument('-H',
                        '--html',
                        action="store_true",
                        help='Save statistics in HTML format to file '
                        '(used with --out-file option)')
    parser.add_argument('-q',
                        '--quiet',
                        action="store_true",
                        help='Be quiet (disable logging except critical) '
                        'Overrides "--verbose" option.')
    parser.add_argument("-v",
                        "--verbose",
                        action="store_true",
                        help="Enable debug logging.")

    args = parser.parse_args()

    if args.verbose:
        logger.setLevel(DEBUG)

    if args.quiet:
        logger.setLevel(CRITICAL)

    testrail_project = get_testrail()

    if args.job_name:
        logger.info('Inspecting {0} build of {1} Jenkins job for TestPlan '
                    'details...'.format(args.build_number, args.job_name))
        test_plan_name = generate_test_plan_name(args.job_name,
                                                 args.build_number)
        test_plan = testrail_project.get_plan_by_name(test_plan_name)
        if test_plan:
            args.plan_id = test_plan['id']
        else:
            logger.warning('TestPlan "{0}" not found!'.format(test_plan_name))

    if not args.plan_id:
        logger.error('There is no TestPlan to process, exiting...')
        return 1

    run_ids = () if not args.run_ids else tuple(
        int(arg) for arg in args.run_ids.split(','))

    generator = StatisticsGenerator(testrail_project, args.plan_id, run_ids,
                                    args.handle_blocked)
    generator.generate()
    stats = generator.dump()

    if args.publish:
        logger.debug('Publishing bugs statistics to TestRail..')
        generator.publish(stats)

    if args.output_file:
        html = generator.dump_html(stats) if args.html else args.html
        save_stats_to_file(stats, args.output_file, html)

        if args.separate_runs:
            for run in generator.test_runs_stats:
                file_name = '{0}_{1}'.format(args.output_file, run['id'])
                stats = generator.dump(run_id=run['id'])
                html = (generator.dump_html(stats, run['id'])
                        if args.html else args.html)
                save_stats_to_file(stats, file_name, html)

    logger.info('Statistics generation complete!')
示例#34
0
def main():
    parser = OptionParser(
        description="Publish results of system tests from Jenkins build to "
                    "TestRail. See settings.py for configuration."
    )
    parser.add_option('-j', '--job-name', dest='job_name', default=None,
                      help='Jenkins swarm runner job name')
    parser.add_option('-N', '--build-number', dest='build_number',
                      default='latest',
                      help='Jenkins swarm runner build number')
    parser.add_option("-l", "--live", dest="live_report", action="store_true",
                      help="Get tests results from running swarm")
    parser.add_option("-v", "--verbose",
                      action="store_true", dest="verbose", default=False,
                      help="Enable debug output")

    (options, _) = parser.parse_args()

    if options.verbose:
        logger.setLevel(DEBUG)

    if options.live_report and options.build_number == 'latest':
        build_number = 'latest_started'
    else:
        build_number = options.build_number

    # STEP #1
    # Initialize TestRail Project and define configuration
    logger.info('Initializing TestRail Project configuration...')
    project = TestRailProject(url=TestRailSettings.url,
                              user=TestRailSettings.user,
                              password=TestRailSettings.password,
                              project=TestRailSettings.project)
    logger.info('Initializing TestRail Project configuration... done')

    operation_systems = [{'name': config['name'], 'id': config['id'],
                          'distro': config['name'].split()[0].lower()}
                         for config in project.get_config_by_name(
                             'Operation System')['configs']]
    os_mile = {'6.1': ['Centos 6.5', 'Ubuntu 14.04'],
               '6.0.1': ['Centos 6.5', 'Ubuntu 12.04']}

    tests_results = {}

    # STEP #2
    # Get tests results from Jenkins
    runner_build = Build(options.job_name, build_number)
    runs = runner_build.build_data['runs']

    # Analyze each test individually
    for run_one in runs:
        if '5.1' in run_one['url']:
            continue  # Release 5.1 to skip
        tests_result = get_job_info(run_one['url'])
        if not tests_result['description']:
            continue  # Not completed results to skip
        if 'skipping' in tests_result['description']:
            continue  # Not performed tests to skip
        tests_job = {'result': tests_result['result'],
                     'name': (options.job_name + '/' +
                              tests_result['url'].split('/')[-3]),
                     'number': int(tests_result['url'].split('/')[-2]),
                     'mile': (tests_result['description'].
                              split()[0].split('-')[0]),
                     'iso': (int(tests_result['description'].
                             split()[0].split('-')[1]))}
        if tests_job['mile'] not in tests_results:
            tests_results[tests_job['mile']] = {}
        test_mile = tests_results[tests_job['mile']]
        if tests_job['iso'] not in test_mile:
            test_mile[tests_job['iso']] = {}
        test_iso = test_mile[tests_job['iso']]
        for os in operation_systems:
            if os['distro'] in tests_job['name'].lower() and\
                    os['name'] in os_mile[tests_job['mile']]:
                if os['id'] not in test_iso:
                    test_iso[os['id']] = []
                test_os_id = test_iso[os['id']]
                test_os_id.extend(get_tests_results(tests_job, os['distro']))

    # STEP #3
    # Create new TestPlan in TestRail (or get existing) and add TestRuns
    for mile in tests_results:
        mile_tests_suite = '{0}{1}'.format(TestRailSettings.tests_suite, mile)
        logger.info(mile_tests_suite)
        tests_suite = project.get_suite_by_name(mile_tests_suite)
        milestone = project.get_milestone_by_name(name=mile)
        for iso_number in tests_results.get(mile, {}):
            # Create new TestPlan name check the same name in testrail
            test_plan_name = '{milestone} iso #{iso_number}'.format(
                milestone=milestone['name'],
                iso_number=iso_number)
            test_plan = project.get_plan_by_name(test_plan_name)
            if not test_plan:
                test_plan = project.add_plan(
                    test_plan_name,
                    description='/'.join([JENKINS['url'],
                                          'job',
                                          '{0}.all'.format(milestone['name']),
                                          str(iso_number)]),
                    milestone_id=milestone['id'],
                    entries=[])
                logger.info('Created new TestPlan "{0}".'
                            .format(test_plan_name))
            else:
                logger.info('Found existing TestPlan "{0}".'
                            .format(test_plan_name))
            plan_entries = []
            # Create a test plan entry
            config_ids = []
            for os in operation_systems:
                if os['name'] in os_mile[mile]:
                    config_ids.append(os['id'])
                    cases_ids = []
                    plan_entries.append(
                        project.test_run_struct(
                            name=tests_suite['name'],
                            suite_id=tests_suite['id'],
                            milestone_id=milestone['id'],
                            description=('Results of system tests ({t_suite})'
                                         ' on iso #"{iso_number}"'
                                         .format(t_suite=tests_suite['name'],
                                                 iso_number=iso_number)),
                            config_ids=[os['id']],
                            include_all=True,
                            case_ids=cases_ids))
            # Create a test plan entry with the test run
            run = find_run_by_name(test_plan, tests_suite['name'])
            if not run:
                logger.info('Adding a test plan entry with test run %s ...',
                            tests_suite['name'])
                entry = project.add_plan_entry(plan_id=test_plan['id'],
                                               suite_id=tests_suite['id'],
                                               config_ids=config_ids,
                                               runs=plan_entries)
                logger.info('The test plan entry has been added.')
                run = entry['runs'][0]
            test_plan = project.get_plan(test_plan['id'])

            # STEP #4
            # Upload tests results to TestRail
            logger.info('Uploading tests results to TestRail...')
            for os_id in tests_results.get(mile, {})\
                    .get(iso_number, {}):
                logger.info('Checking tests results for %s...',
                            project.get_config(os_id)['name'])
                tests_added = publish_results(
                    project=project,
                    milestone_id=milestone['id'],
                    test_plan=test_plan,
                    suite_id=tests_suite['id'],
                    config_id=os_id,
                    results=tests_results[mile][iso_number][os_id])
                logger.debug('Added new results for tests (%s): %s',
                             project.get_config(os_id)['name'],
                             [r.group for r in tests_added])

            logger.info('Report URL: %s', test_plan['url'])