예제 #1
0
파일: builds.py 프로젝트: dtsapikov/fuel-qa
 def get_build_data(self, depth=1):
     build_url = "/".join([JENKINS["url"], 'job',
                           self.name,
                           str(self.number),
                           'api/json?depth={depth}'.format(depth=depth)])
     logger.debug("Request build data from {}".format(build_url))
     return requests.get(build_url).json()
예제 #2
0
def get_test_build(build_name, build_number, check_rebuild=False,
                   force_rebuild_search=False):
    """Get test data from Jenkins job build
    :param build_name: string
    :param build_number: string
    :param check_rebuild: bool, if True then look for newer job rebuild(s)
    :param force_rebuild_search: bool, if True then force rebuild(s) search
    :return: dict
    """
    test_build = Build(build_name, build_number)
    first_case = test_build.test_data()['suites'][0]['cases'].pop()['name']

    if (force_rebuild_search or first_case == 'jenkins') and check_rebuild:
        iso_magnet = get_job_parameter(test_build.build_data, 'MAGNET_LINK')
        if not iso_magnet:
            return test_build

        latest_build_number = Build(build_name, 'latest').number
        builds_to_check = [i for i in
                           range(build_number + 1, latest_build_number + 1)]
        if force_rebuild_search:
            builds_to_check.reverse()

        for n in builds_to_check:
            test_rebuild = Build(build_name, n)
            if get_job_parameter(test_rebuild.build_data, 'MAGNET_LINK') \
                    == iso_magnet:
                logger.debug("Found test job rebuild: "
                             "{0}".format(test_rebuild.url))
                return test_rebuild
    return test_build
예제 #3
0
def _is_excluded(case_name, exclude):
    if exclude and exclude in case_name:
        logger.debug("Skipping '{0}' test because it contains"
                     " '{1}' in method name".format(case_name, exclude))
        return True
    else:
        return False
예제 #4
0
def _is_not_included(case_name, include):
    if include and include not in case_name:
        logger.debug("Skipping '{0}' test because it doesn't "
                     "contain '{1}' in method name".format(case_name, include))
        return True
    else:
        return False
예제 #5
0
def get_tests_groups_from_jenkins(runner_name, build_number, distros):
    runner_build = Build(runner_name, build_number)
    res = {}
    for b in runner_build.build_data['subBuilds']:

        if b['result'] is None:
            logger.debug("Skipping '{0}' job (build #{1}) because it's still "
                         "running...".format(b['jobName'], b['buildNumber'],))
            continue

        # Get the test group from the console of the job
        z = Build(b['jobName'], b['buildNumber'])
        console = z.get_job_console()
        groups = [keyword.split('=')[1]
                  for line in console
                  for keyword in line.split()
                  if 'run_system_test.py' in line and '--group=' in keyword]
        if not groups:
            logger.error("No test group found in console of the job {0}/{1}"
                         .format(b['jobName'], b['buildNumber']))
            continue
        # Use the last group (there can be several groups in upgrade jobs)
        test_group = groups[-1]

        # Get the job suffix
        job_name = b['jobName']
        for distro in distros:
            if distro in job_name:
                sep = '.' + distro + '.'
                job_suffix = job_name.split(sep)[-1]
                break
        else:
            job_suffix = job_name.split('.')[-1]
        res[job_suffix] = test_group
    return res
예제 #6
0
파일: builds.py 프로젝트: dtsapikov/fuel-qa
def get_jobs_for_view(view):
    """Return list of jobs from specified view
    """
    view_url = "/".join([JENKINS["url"], 'view', view, 'api/json'])
    logger.debug("Request view data from {}".format(view_url))
    view_data = requests.get(view_url).json()
    jobs = [job["name"] for job in view_data["jobs"]]
    return jobs
예제 #7
0
def get_tests_groups_from_jenkins(runner_name, build_number, distros):
    runner_build = Build(runner_name, build_number)
    res = {}
    sub_builds = \
        runner_build.build_data.get('subBuilds', [runner_build.build_data])
    for b in sub_builds:
        if b['result'] is None:
            logger.debug("Skipping '{0}' job (build #{1}) because it's still "
                         "running...".format(
                             b['jobName'],
                             b['buildNumber'],
                         ))
            continue

        # Get the test group from the console of the job
        # Get the job suffix
        if b.get('jobName'):
            z = Build(b['jobName'], b['buildNumber'])
            console = z.get_job_console()
            job_name = b['jobName']
            job_info = z.job_info
            env_vars = z.injected_vars
        else:
            console = runner_build.get_job_console()
            job_name = runner_build.name
            job_info = runner_build.job_info
            env_vars = runner_build.injected_vars

        groups = re.findall(TEST_GROUP_PATTERN, console)

        if not groups:
            # maybe it's failed baremetal job?
            # because of a design baremetal tests run pre-setup job
            # and when it fails there are no test groups in common meaning:
            # groups which could be parsed by TEST_GROUP_PATTERN
            baremetal_pattern = re.compile(r'Jenkins Build.*jenkins-(.*)-\d+')
            baremetal_groups = re.findall(baremetal_pattern, console)
            if not baremetal_groups:
                logger.error(
                    "No test group found in console of the job {0}/{1}".format(
                        b['jobName'], b['buildNumber']))
                continue
            # we should get the group via jobName because the test group name
            # inside the log could be cut and some symbols will be changed to *
            groups = b['jobName'].split('.')
        # Use the last group (there can be several groups in upgrade jobs)
        test_group = groups[-1]

        for distro in distros:
            if distro in job_name:
                sep = '.' + distro + '.'
                job_suffix = job_name.split(sep)[-1]
                break
        else:
            job_suffix = job_name.split('.')[-1]
        res[job_suffix] = \
            {'group': test_group, 'job_info': job_info, 'env_vars': env_vars}
    return res
    def bugs_statistics(self):
        if self._bugs_statistics != {}:
            return self._bugs_statistics
        logger.info(
            'Collecting stats for TestRun "{0}" on "{1}"...'.format(
                self.run["name"], self.run["config"] or "default config"
            )
        )

        for test in self.tests:
            logger.debug('Checking "{0}" test...'.format(test["title"]))
            test_results = sorted(
                self.project.get_results_for_test(test["id"], self.results), key=lambda x: x["id"], reverse=True
            )

            linked_bugs = []
            is_blocked = False

            for result in test_results:
                if result["status_id"] in self.blocked_statuses:
                    if self.check_blocked:
                        new_bug_link = self.handle_blocked(test, result)
                        if new_bug_link:
                            linked_bugs.append(new_bug_link)
                            is_blocked = True
                            break
                    if result["custom_launchpad_bug"]:
                        linked_bugs.append(result["custom_launchpad_bug"])
                        is_blocked = True
                        break
                if result["status_id"] in self.failed_statuses and result["custom_launchpad_bug"]:
                    linked_bugs.append(result["custom_launchpad_bug"])

            bug_ids = set(
                [
                    re.search(r".*bugs?/(\d+)/?", link).group(1)
                    for link in linked_bugs
                    if re.search(r".*bugs?/(\d+)/?", link)
                ]
            )

            for bug_id in bug_ids:
                if bug_id in self._bugs_statistics:
                    self._bugs_statistics[bug_id][test["id"]] = {
                        "group": test["custom_test_group"] or "manual",
                        "config": self.run["config"] or "default",
                        "blocked": is_blocked,
                    }

                else:
                    self._bugs_statistics[bug_id] = {
                        test["id"]: {
                            "group": test["custom_test_group"] or "manual",
                            "config": self.run["config"] or "default",
                            "blocked": is_blocked,
                        }
                    }
        return self._bugs_statistics
예제 #9
0
def publish_results(project, milestone_id, test_plan, suite_id, config_id,
                    results):
    test_run_ids = [
        run['id'] for entry in test_plan['entries'] for run in entry['runs']
        if suite_id == run['suite_id'] and config_id in run['config_ids']
    ]
    logger.debug('Looking for previous tests runs on "{0}" using tests suite '
                 '"{1}"...'.format(
                     project.get_config(config_id)['name'],
                     project.get_suite(suite_id)['name']))
    previous_tests_runs = project.get_previous_runs(
        milestone_id=milestone_id,
        suite_id=suite_id,
        config_id=config_id,
        limit=TestRailSettings.previous_results_depth)
    cases = project.get_cases(suite_id=suite_id)
    tests = project.get_tests(run_id=test_run_ids[0])
    results_to_publish = []

    for result in results:
        test = project.get_test_by_group(run_id=test_run_ids[0],
                                         group=result.group,
                                         tests=tests)
        if not test:
            logger.error("Test for '{0}' group not found: {1}".format(
                result.group, result.url))
            continue
        existing_results_versions = [
            r['version'] for r in project.get_results_for_test(test['id'])
        ]
        if result.version in existing_results_versions:
            continue
        if result.status not in ('passed', 'blocked'):
            case_id = project.get_case_by_group(suite_id=suite_id,
                                                group=result.group,
                                                cases=cases)['id']
            run_ids = [
                run['id']
                for run in previous_tests_runs[0:int(TestRailSettings.
                                                     previous_results_depth)]
            ]
            previous_results = project.get_all_results_for_case(
                run_ids=run_ids, case_id=case_id)
            lp_bug = get_existing_bug_link(previous_results)
            if lp_bug:
                result.launchpad_bug = lp_bug['bug_link']
        results_to_publish.append(result)

    try:
        if len(results_to_publish) > 0:
            project.add_results_for_cases(run_id=test_run_ids[0],
                                          suite_id=suite_id,
                                          tests_results=results_to_publish)
    except:
        logger.error('Failed to add new results for tests: {0}'.format(
            [r.group for r in results_to_publish]))
        raise
    return results_to_publish
예제 #10
0
파일: builds.py 프로젝트: dtsapikov/fuel-qa
    def get_test_data(url, result_path=None):
        if result_path:
            test_url = "/".join(
                [url.rstrip("/"), 'testReport'] + result_path + ['api/json'])
        else:
            test_url = "/".join([url.rstrip("/"), 'testReport', 'api/json'])

        logger.debug("Request test data from {}".format(test_url))
        return requests.get(test_url).json()
예제 #11
0
def add_case(testrail_project, suite, test_case, section, do_check=False):
    try_msg = "try to add test: {} to section: {}"
    miss_msg = "test: {} is already on section: {}"
    if do_check and testrail_project.get_case_by_name(suite['id'],
                                                      test_case['title']):
        logger.debug(miss_msg.format(test_case["title"], test_case["section"]))
        return
    logger.debug(try_msg.format(test_case["title"], test_case["section"]))
    testrail_project.add_case(section_id=section["id"], case=test_case)
def add_case(testrail_project, suite, test_case, section, do_check=False):
    try_msg = "try to add test: {} to section: {}"
    miss_msg = "test: {} is already on section: {}"
    if do_check and testrail_project.get_case_by_name(suite['id'],
                                                      test_case['title']):
        logger.debug(miss_msg.format(test_case["title"], test_case["section"]))
        return
    logger.debug(try_msg.format(test_case["title"], test_case["section"]))
    testrail_project.add_case(section_id=section["id"], case=test_case)
예제 #13
0
    def get_test_data(url, result_path=None):
        if result_path:
            test_url = "/".join([url.rstrip("/"), 'testReport'] + result_path +
                                ['api/json'])
        else:
            test_url = "/".join([url.rstrip("/"), 'testReport', 'api/json'])

        logger.debug("Request test data from {}".format(test_url))
        return requests.get(test_url).json()
예제 #14
0
파일: report.py 프로젝트: avgoor/fuel-qa
def publish_results(project, milestone_id, test_plan,
                    suite_id, config_id, results):
    test_run_ids = [run['id'] for entry in test_plan['entries']
                    for run in entry['runs'] if suite_id == run['suite_id'] and
                    config_id in run['config_ids']]
    logger.debug('Looking for previous tests runs on "{0}" using tests suite '
                 '"{1}"...'.format(project.get_config(config_id)['name'],
                                   project.get_suite(suite_id)['name']))
    previous_tests_runs = project.get_previous_runs(
        milestone_id=milestone_id,
        suite_id=suite_id,
        config_id=config_id,
        limit=TestRailSettings.previous_results_depth)
    logger.debug('Found next test runs: {0}'.format(
        [test_run['description'] for test_run in previous_tests_runs]))
    cases = project.get_cases(suite_id=suite_id)
    tests = project.get_tests(run_id=test_run_ids[0])
    results_to_publish = []

    for result in results:
        test = project.get_test_by_group(run_id=test_run_ids[0],
                                         group=result.group,
                                         tests=tests)
        if not test:
            logger.error("Test for '{0}' group not found: {1}".format(
                result.group, result.url))
            continue
        existing_results_versions = [r['version'] for r in
                                     project.get_results_for_test(test['id'])]
        if result.version in existing_results_versions:
            continue
        if result.status not in ('passed', 'blocked'):
            case_id = project.get_case_by_group(suite_id=suite_id,
                                                group=result.group,
                                                cases=cases)['id']
            run_ids = [run['id'] for run in previous_tests_runs[0:
                       int(TestRailSettings.previous_results_depth)]]
            previous_results = project.get_all_results_for_case(
                run_ids=run_ids,
                case_id=case_id)
            lp_bug = get_existing_bug_link(previous_results)
            if lp_bug:
                result.launchpad_bug = lp_bug['bug_link']
        results_to_publish.append(result)

    try:
        if len(results_to_publish) > 0:
            project.add_results_for_cases(run_id=test_run_ids[0],
                                          suite_id=suite_id,
                                          tests_results=results_to_publish)
    except:
        logger.error('Failed to add new results for tests: {0}'.format(
            [r.group for r in results_to_publish]
        ))
        raise
    return results_to_publish
def get_tests_descriptions(milestone_id, testrail_default_test_priority,
                           testrail_project):
    # To get the Tempest tests list, need to execute the following commands:
    # git clone https://github.com/openstack/tempest & cd tempest & tox -evenv
    # .tox/venv/bin/pip install nose
    get_tempest_tests = ("cd tempest && .tox/venv/bin/nosetests "
                         "--collect-only tempest/{0} -v 2>&1 | grep 'id-.*'")
    get_commit = "cd tempest && git rev-parse HEAD"
    commit = subprocess.Popen(get_commit, shell=True, stdout=subprocess.PIPE)
    logger.info("Generate test suite for tempest"
                " commit:{}".format(commit.stdout.readline()))
    custom_cases_fields = _get_custom_cases_fields(
        case_fields=testrail_project.get_case_fields(),
        project_id=testrail_project.project['id'])
    tests = {}

    for group in TEST_GROUPS:
        p = subprocess.Popen(get_tempest_tests.format(group.lower()),
                             shell=True, stdout=subprocess.PIPE)

        for line in iter(p.stdout.readline, b''):
            section = generate_groups(line) if group == "API" else group

            test_class = []
            for r in line.split("."):
                if "id-" in r:
                    title = r.strip()
                    break
                else:
                    test_class.append(r)

            steps = [{"run this tempest test": "passed"}, ]

            test_case = {
                "title": title,
                "type_id": 1,
                "milestone_id": milestone_id,
                "priority_id": testrail_default_test_priority,
                "estimate": "1m",
                "refs": "",
                "custom_report_label": title.split('id-')[1][:-1],
                "custom_test_group": ".".join(test_class),
                "custom_test_case_description": title,
                "custom_test_case_steps": steps,
                "section": section
            }
            for case_field, default_value in custom_cases_fields.items():
                if case_field not in test_case:
                    test_case[case_field] = default_value
            if section not in tests:
                tests[section] = []
            tests[section].append(test_case)
    logger.debug(tests)
    logger.info("total test cases: "
                "{}".format(sum(map(lambda x: len(x), tests.values()))))
    return tests
예제 #16
0
def get_build_artifact(url, artifact):
    """Return content of job build artifact
    """
    url = "/".join([url, 'artifact', artifact])
    logger.debug("Request artifact content from {}".format(url))
    req = request.Request(url)
    opener = request.build_opener(request.HTTPHandler)
    s = opener.open(req).read()
    opener.close()
    return s
예제 #17
0
    def get_test_data(url, result_path=None):
        if result_path:
            test_url = "/".join([url.rstrip("/"), 'testReport'] + result_path +
                                ['api/json'])
        else:
            test_url = "/".join([url.rstrip("/"), 'testReport', 'api/json'])

        logger.debug("Request test data from {}".format(test_url))
        response = request.urlopen(test_url)
        return json.load(response)
    def bugs_statistics(self):
        if self._bugs_statistics != {}:
            return self._bugs_statistics
        logger.info('Collecting stats for TestRun "{0}" on "{1}"...'.format(
            self.run['name'], self.run['config'] or 'default config'))

        for test in self.tests:
            logger.debug('Checking "{0}" test...'.format(test['title']))
            test_results = sorted(self.project.get_results_for_test(
                test['id'], self.results),
                                  key=lambda x: x['id'],
                                  reverse=True)

            linked_bugs = []
            is_blocked = False

            for result in test_results:
                if result['status_id'] in self.blocked_statuses:
                    if self.check_blocked:
                        new_bug_link = self.handle_blocked(test, result)
                        if new_bug_link:
                            linked_bugs.append(new_bug_link)
                            is_blocked = True
                            break
                    if result['custom_launchpad_bug']:
                        linked_bugs.append(result['custom_launchpad_bug'])
                        is_blocked = True
                        break
                if result['status_id'] in self.failed_statuses \
                        and result['custom_launchpad_bug']:
                    linked_bugs.append(result['custom_launchpad_bug'])

            bug_ids = set([
                re.search(r'.*bug/(\d+)/?', link).group(1)
                for link in linked_bugs if re.search(r'.*bug/(\d+)/?', link)
            ])

            for bug_id in bug_ids:
                if bug_id in self._bugs_statistics:
                    self._bugs_statistics[bug_id][test['id']] = {
                        'group': test['custom_test_group'] or 'manual',
                        'config': self.run['config'] or 'default',
                        'blocked': is_blocked
                    }

                else:
                    self._bugs_statistics[bug_id] = {
                        test['id']: {
                            'group': test['custom_test_group'] or 'manual',
                            'config': self.run['config'] or 'default',
                            'blocked': is_blocked
                        }
                    }
        return self._bugs_statistics
예제 #19
0
def get_jobs_for_view(view):
    """Return list of jobs from specified view
    """
    view_url = "/".join([JENKINS["url"], 'view', view, 'api/json'])
    logger.debug("Request view data from {}".format(view_url))
    req = request.Request(view_url)
    opener = request.build_opener(request.HTTPHandler)
    s = opener.open(req).read()
    opener.close()
    view_data = json.loads(s)
    jobs = [job["name"] for job in view_data["jobs"]]
    return jobs
예제 #20
0
    def bugs_statistics(self):
        if self._bugs_statistics != {}:
            return self._bugs_statistics
        logger.info('Collecting stats for TestRun "{0}" on "{1}"...'.format(
            self.run['name'], self.run['config'] or 'default config'))

        for test in self.tests:
            logger.debug('Checking "{0}" test...'.format(test['title']))
            test_results = sorted(
                self.project.get_results_for_test(test['id'], self.results),
                key=lambda x: x['id'], reverse=True)

            linked_bugs = []
            is_blocked = False

            for result in test_results:
                if result['status_id'] in self.blocked_statuses:
                    if self.check_blocked:
                        new_bug_link = self.handle_blocked(test, result)
                        if new_bug_link:
                            linked_bugs.append(new_bug_link)
                            is_blocked = True
                            break
                    if result['custom_launchpad_bug']:
                        linked_bugs.append(result['custom_launchpad_bug'])
                        is_blocked = True
                        break
                if result['status_id'] in self.failed_statuses \
                        and result['custom_launchpad_bug']:
                    linked_bugs.append(result['custom_launchpad_bug'])

            bug_ids = set([re.search(r'.*bug/(\d+)/?', link).group(1)
                           for link in linked_bugs
                           if re.search(r'.*bug/(\d+)/?', link)])

            for bug_id in bug_ids:
                if bug_id in self._bugs_statistics:
                    self._bugs_statistics[bug_id][test['id']] = {
                        'group': test['custom_test_group'] or 'manual',
                        'config': self.run['config'] or 'default',
                        'blocked': is_blocked
                    }

                else:
                    self._bugs_statistics[bug_id] = {
                        test['id']: {
                            'group': test['custom_test_group'] or 'manual',
                            'config': self.run['config'] or 'default',
                            'blocked': is_blocked
                        }
                    }
        return self._bugs_statistics
예제 #21
0
    def handle_blocked(self, test, result):
        if result['custom_launchpad_bug']:
            return False
        m = re.search(r'Blocked by "(\S+)" test.', result['comment'])
        if m:
            blocked_test_group = m.group(1)
        else:
            logger.debug('Blocked result #{0} for test {1} does '
                         'not have upstream test name in its '
                         'comments!'.format(result['id'],
                                            test['custom_test_group']))
            return False

        if not result['version']:
            logger.debug('Blocked result #{0} for test {1} does '
                         'not have version, can\'t find upstream '
                         'test case!'.format(result['id'],
                                             test['custom_test_group']))
            return False

        bug_link = None
        blocked_test = self.get_test_by_group(blocked_test_group,
                                              result['version'])
        if not blocked_test:
            return False
        logger.debug('Test {0} was blocked by failed test {1}'.format(
            test['custom_test_group'], blocked_test_group))

        blocked_results = self.project.get_results_for_test(
            blocked_test['id'])

        # Since we manually add results to failed tests with statuses
        # ProdFailed, TestFailed, etc. and attach bugs links to them,
        # we could skip original version copying. So look for test
        # results with target version, but allow to copy links to bugs
        # from other results of the same test (newer are checked first)
        if not any(br['version'] == result['version'] and
                   br['status_id'] in self.failed_statuses
                   for br in blocked_results):
            logger.debug('Did not find result for test {0} with version '
                         '{1}!'.format(blocked_test_group, result['version']))
            return False

        for blocked_result in sorted(blocked_results,
                                     key=lambda x: x['id'],
                                     reverse=True):
            if blocked_result['status_id'] not in self.failed_statuses:
                continue

            if blocked_result['custom_launchpad_bug']:
                bug_link = blocked_result['custom_launchpad_bug']
                break

        if bug_link is not None:
            result['custom_launchpad_bug'] = bug_link
            self.project.add_raw_results_for_test(test['id'], result)
            logger.info('Added bug {0} to blocked result of {1} test.'.format(
                bug_link, test['custom_test_group']))
            return bug_link
        return False
    def handle_blocked(self, test, result):
        if result['custom_launchpad_bug']:
            return False
        m = re.search(r'Blocked by "(\S+)" test.', result['comment'])
        if m:
            blocked_test_group = m.group(1)
        else:
            logger.debug('Blocked result #{0} for test {1} does '
                         'not have upstream test name in its '
                         'comments!'.format(result['id'],
                                            test['custom_test_group']))
            return False

        if not result['version']:
            logger.debug('Blocked result #{0} for test {1} does '
                         'not have version, can\'t find upstream '
                         'test case!'.format(result['id'],
                                             test['custom_test_group']))
            return False

        bug_link = None
        blocked_test = self.get_test_by_group(blocked_test_group,
                                              result['version'])
        if not blocked_test:
            return False
        logger.debug('Test {0} was blocked by failed test {1}'.format(
            test['custom_test_group'], blocked_test_group))

        blocked_results = self.project.get_results_for_test(blocked_test['id'])

        # Since we manually add results to failed tests with statuses
        # ProdFailed, TestFailed, etc. and attach bugs links to them,
        # we could skip original version copying. So look for test
        # results with target version, but allow to copy links to bugs
        # from other results of the same test (newer are checked first)
        if not any(br['version'] == result['version']
                   and br['status_id'] in self.failed_statuses
                   for br in blocked_results):
            logger.debug('Did not find result for test {0} with version '
                         '{1}!'.format(blocked_test_group, result['version']))
            return False

        for blocked_result in sorted(blocked_results,
                                     key=lambda x: x['id'],
                                     reverse=True):
            if blocked_result['status_id'] not in self.failed_statuses:
                continue

            if blocked_result['custom_launchpad_bug']:
                bug_link = blocked_result['custom_launchpad_bug']
                break

        if bug_link is not None:
            result['custom_launchpad_bug'] = bug_link
            self.project.add_raw_results_for_test(test['id'], result)
            logger.info('Added bug {0} to blocked result of {1} test.'.format(
                bug_link, test['custom_test_group']))
            return bug_link
        return False
예제 #23
0
 def get_injected_vars(self, depth=1, build_number=None):
     if not build_number:
         return []
     job_url = "/".join([JENKINS["url"], 'job', self.name,
                         str(build_number), 'injectedEnvVars',
                         'api/json?depth={depth}'.format(depth=depth)])
     logger.debug("Request injected variables from job {}".format(job_url))
     try:
         result = requests.get(job_url).json()
     except JSONDecodeError:
         logger.debug(
             "Failed to decode injected variables from job {}".format(
                 job_url))
         result = []
     return result
def upload_tests_descriptions(testrail_project, section_id, tests,
                              check_all_sections):
    tests_suite = testrail_project.get_suite_by_name(
        TestRailSettings.tests_suite)
    check_section = None if check_all_sections else section_id
    existing_cases = [
        case['custom_test_group']
        for case in testrail_project.get_cases(suite_id=tests_suite['id'],
                                               section_id=check_section)
    ]
    custom_cases_fields = {}
    for field in testrail_project.get_case_fields():
        for config in field['configs']:
            if ((testrail_project.project['id']
                 in config['context']['project_ids']
                 or not config['context']['project_ids'])
                    and config['options']['is_required']):
                try:
                    custom_cases_fields[field['system_name']] = \
                        int(config['options']['items'].split(',')[0])
                except:
                    logger.error("Couldn't find default value for required "
                                 "field '{0}', setting '1' (index)!".format(
                                     field['system_name']))
                    custom_cases_fields[field['system_name']] = 1

    for test_case in tests:
        if test_case['custom_test_group'] in existing_cases:
            logger.debug('Skipping uploading "{0}" test case because it '
                         'already exists in "{1}" tests section.'.format(
                             test_case['custom_test_group'],
                             TestRailSettings.tests_suite))
            continue

        for case_field, default_value in custom_cases_fields.items():
            if case_field not in test_case:
                test_case[case_field] = default_value

        logger.debug('Uploading test "{0}" to TestRail project "{1}", '
                     'suite "{2}", section "{3}"'.format(
                         test_case["custom_test_group"],
                         TestRailSettings.project,
                         TestRailSettings.tests_suite,
                         TestRailSettings.tests_section))
        testrail_project.add_case(section_id=section_id, case=test_case)
예제 #25
0
 def wrapper(*args, **kwargs):
     iter_number = 0
     while True:
         try:
             response = func(*args, **kwargs)
         except HTTPError as e:
             if e.code in codes:
                 if iter_number < codes[e.code]:
                     wait = 5
                     if 'Retry-After' in e.hdrs:
                         wait = int(e.hdrs['Retry-after'])
                     logger.debug(log_msg.format(e.code, wait))
                     time.sleep(wait)
                     iter_number += 1
                     continue
             raise
         else:
             return response
예제 #26
0
def _is_case_processable(case, tests):
    if not case.entry.info.enabled or not hasattr(case.entry, 'parent'):
        return False

    parent_home = case.entry.parent.home
    if issubclass(parent_home, ActionTest) and \
            any([test[GROUP_FIELD] == parent_home.__name__ for test in tests]):
        return False

    # Skip @before_class methods without doc strings:
    # they are just pre-checks, not separate tests cases
    if case.entry.info.before_class:
        if case.entry.home.func_doc is None:
            logger.debug('Skipping method "{0}", because it is not a '
                         'test case'.format(case.entry.home.func_name))
            return False

    return True
def get_tests_groups_from_jenkins(runner_name, build_number, distros):
    runner_build = Build(runner_name, build_number)
    res = {}
    sub_builds = \
        runner_build.build_data.get('subBuilds', [runner_build.build_data])
    for b in sub_builds:
        if b['result'] is None:
            logger.debug("Skipping '{0}' job (build #{1}) because it's still "
                         "running...".format(b['jobName'], b['buildNumber'],))
            continue

        # Get the test group from the console of the job
        # Get the job suffix
        if b.get('jobName'):
            z = Build(b['jobName'], b['buildNumber'])
            console = z.get_job_console()
            job_name = b['jobName']
            job_info = z.job_info
            env_vars = z.injected_vars
        else:
            console = runner_build.get_job_console()
            job_name = runner_build.name
            job_info = runner_build.job_info
            env_vars = runner_build.injected_vars

        groups = re.findall(TEST_GROUP_PATTERN, console)

        if not groups:
            logger.error("No test group found in console of the job {0}/{1}"
                         .format(b['jobName'], b['buildNumber']))
            continue
        # Use the last group (there can be several groups in upgrade jobs)
        test_group = groups[-1]

        for distro in distros:
            if distro in job_name:
                sep = '.' + distro + '.'
                job_suffix = job_name.split(sep)[-1]
                break
        else:
            job_suffix = job_name.split('.')[-1]
        res[job_suffix] = \
            {'group': test_group, 'job_info': job_info, 'env_vars': env_vars}
    return res
예제 #28
0
 def wrapper(*args, **kwargs):
     iter_number = 0
     while True:
         try:
             response = func(*args, **kwargs)
             response.raise_for_status()
         except HTTPError as e:
             error_code = e.response.status_code
             if error_code in codes:
                 if iter_number < codes[error_code]:
                     wait = 5
                     if 'Retry-After' in e.response.headers:
                         wait = int(e.response.headers['Retry-after'])
                     logger.debug(log_msg.format(error_code, wait))
                     time.sleep(wait)
                     iter_number += 1
                     continue
             raise
         else:
             return response.json()
예제 #29
0
def get_downstream_builds_from_html(url):
    """Return list of downstream jobs builds from specified job
    """
    url = "/".join([url, 'downstreambuildview/'])
    logger.debug("Request downstream builds data from {}".format(url))
    response = requests.get(url).text
    jobs = []
    raw_downstream_builds = re.findall(
        '.*downstream-buildview.*href="(/job/\S+/[0-9]+/).*', response)
    for raw_build in raw_downstream_builds:
        sub_job_name = raw_build.split('/')[2]
        sub_job_build = raw_build.split('/')[3]
        build = Build(name=sub_job_name, number=sub_job_build)
        jobs.append({
            'name': build.name,
            'number': build.number,
            'result': build.build_data['result']
        })

    return jobs
예제 #30
0
 def wrapper(*args, **kwargs):
     iter_number = 0
     while True:
         try:
             response = func(*args, **kwargs)
             response.raise_for_status()
         except HTTPError as e:
             error_code = e.response.status_code
             if error_code in codes:
                 if iter_number < codes[error_code]:
                     wait = 5
                     if 'Retry-After' in e.response.headers:
                         wait = int(e.response.headers['Retry-after'])
                     logger.debug(log_msg.format(error_code, wait))
                     time.sleep(wait)
                     iter_number += 1
                     continue
             raise
         else:
             return response.json()
예제 #31
0
def get_tests_groups_from_jenkins(runner_name, build_number, distros):
    runner_build = Build(runner_name, build_number)
    res = {}
    for b in runner_build.build_data['subBuilds']:

        if b['result'] is None:
            logger.debug("Skipping '{0}' job (build #{1}) because it's still "
                         "running...".format(
                             b['jobName'],
                             b['buildNumber'],
                         ))
            continue

        # Get the test group from the console of the job
        z = Build(b['jobName'], b['buildNumber'])
        console = z.get_job_console()
        groups = [
            keyword.split('=')[1] for line in console
            for keyword in line.split()
            if 'run_system_test.py' in line and '--group=' in keyword
        ]
        if not groups:
            logger.error(
                "No test group found in console of the job {0}/{1}".format(
                    b['jobName'], b['buildNumber']))
            continue
        # Use the last group (there can be several groups in upgrade jobs)
        test_group = groups[-1]

        # Get the job suffix
        job_name = b['jobName']
        for distro in distros:
            if distro in job_name:
                sep = '.' + distro + '.'
                job_suffix = job_name.split(sep)[-1]
                break
        else:
            job_suffix = job_name.split('.')[-1]
        res[job_suffix] = test_group
    return res
예제 #32
0
파일: builds.py 프로젝트: dtsapikov/fuel-qa
def get_downstream_builds_from_html(url):
    """Return list of downstream jobs builds from specified job
    """
    url = "/".join([url, 'downstreambuildview/'])
    logger.debug("Request downstream builds data from {}".format(url))
    response = requests.get(url).text
    jobs = []
    raw_downstream_builds = re.findall(
        '.*downstream-buildview.*href="(/job/\S+/[0-9]+/).*', response)
    for raw_build in raw_downstream_builds:
        sub_job_name = raw_build.split('/')[2]
        sub_job_build = raw_build.split('/')[3]
        build = Build(name=sub_job_name, number=sub_job_build)
        jobs.append(
            {
                'name': build.name,
                'number': build.number,
                'result': build.build_data['result']
            }
        )

    return jobs
예제 #33
0
파일: report.py 프로젝트: dtsapikov/fuel-qa
def get_test_build(build_name, build_number, check_rebuild=False):
    """Get test data from Jenkins job build
    :param build_name: string
    :param build_number: string
    :param check_rebuild: bool, if True then look for newer job rebuild(s)
    :return: dict
    """
    test_build = Build(build_name, build_number)
    if test_build.test_data()['suites'][0]['cases'].pop()['name'] == 'jenkins':
        if not check_rebuild:
            return test_build
        iso_magnet = get_job_parameter(test_build.build_data, 'MAGNET_LINK')
        if not iso_magnet:
            return test_build
        latest_build_number = Build(build_name, 'latest').number
        for n in range(build_number, latest_build_number):
            test_rebuild = Build(build_name, n + 1)
            if get_job_parameter(test_rebuild.build_data, 'MAGNET_LINK') \
                    == iso_magnet:
                logger.debug("Found test job rebuild: "
                             "{0}".format(test_rebuild.url))
                return test_rebuild
    return test_build
예제 #34
0
def get_test_build(build_name, build_number, check_rebuild=False):
    """Get test data from Jenkins job build
    :param build_name: string
    :param build_number: string
    :param check_rebuild: bool, if True then look for newer job rebuild(s)
    :return: dict
    """
    test_build = Build(build_name, build_number)
    if test_build.test_data()['suites'][0]['cases'].pop()['name'] == 'jenkins':
        if not check_rebuild:
            return test_build
        iso_magnet = get_job_parameter(test_build.build_data, 'MAGNET_LINK')
        if not iso_magnet:
            return test_build
        latest_build_number = Build(build_name, 'latest').number
        for n in range(build_number, latest_build_number):
            test_rebuild = Build(build_name, n + 1)
            if get_job_parameter(test_rebuild.build_data, 'MAGNET_LINK') \
                    == iso_magnet:
                logger.debug("Found test job rebuild: "
                             "{0}".format(test_rebuild.url))
                return test_rebuild
    return test_build
예제 #35
0
def upload_tests_descriptions(testrail_project, section_id,
                              tests, check_all_sections):
    tests_suite = testrail_project.get_suite_by_name(
        TestRailSettings.tests_suite)
    check_section = None if check_all_sections else section_id
    cases = testrail_project.get_cases(suite_id=tests_suite['id'],
                                       section_id=check_section)
    existing_cases = [case[GROUP_FIELD] for case in cases]
    custom_cases_fields = _get_custom_cases_fields(
        case_fields=testrail_project.get_case_fields(),
        project_id=testrail_project.project['id'])

    for test_case in tests:
        if test_case[GROUP_FIELD] in existing_cases:
            testrail_case = _get_testrail_case(testrail_cases=cases,
                                               test_case=test_case,
                                               group_field=GROUP_FIELD)
            fields_to_update = _get_fields_to_update(test_case, testrail_case)

            if fields_to_update:
                logger.debug('Updating test "{0}" in TestRail project "{1}", '
                             'suite "{2}", section "{3}". Updated fields: {4}'
                             .format(
                                 test_case[GROUP_FIELD],
                                 TestRailSettings.project,
                                 TestRailSettings.tests_suite,
                                 TestRailSettings.tests_section,
                                 ', '.join(fields_to_update.keys())))
                testrail_project.update_case(case_id=testrail_case['id'],
                                             fields=fields_to_update)
            else:
                logger.debug('Skipping "{0}" test case uploading because '
                             'it is up-to-date in "{1}" suite'
                             .format(test_case[GROUP_FIELD],
                                     TestRailSettings.tests_suite))

        else:
            for case_field, default_value in custom_cases_fields.items():
                if case_field not in test_case:
                    test_case[case_field] = default_value

            logger.debug('Uploading test "{0}" to TestRail project "{1}", '
                         'suite "{2}", section "{3}"'.format(
                             test_case[GROUP_FIELD],
                             TestRailSettings.project,
                             TestRailSettings.tests_suite,
                             TestRailSettings.tests_section))
            testrail_project.add_case(section_id=section_id, case=test_case)
예제 #36
0
def upload_tests_descriptions(testrail_project, section_id, tests,
                              check_all_sections):
    tests_suite = testrail_project.get_suite_by_name(
        TestRailSettings.tests_suite)
    check_section = None if check_all_sections else section_id
    cases = testrail_project.get_cases(suite_id=tests_suite['id'],
                                       section_id=check_section)
    existing_cases = [case[GROUP_FIELD] for case in cases]
    custom_cases_fields = _get_custom_cases_fields(
        case_fields=testrail_project.get_case_fields(),
        project_id=testrail_project.project['id'])

    for test_case in tests:
        if test_case[GROUP_FIELD] in existing_cases:
            testrail_case = _get_testrail_case(testrail_cases=cases,
                                               test_case=test_case,
                                               group_field=GROUP_FIELD)
            fields_to_update = _get_fields_to_update(test_case, testrail_case)

            if fields_to_update:
                logger.debug(
                    'Updating test "{0}" in TestRail project "{1}", '
                    'suite "{2}", section "{3}". Updated fields: {4}'.format(
                        test_case[GROUP_FIELD], TestRailSettings.project,
                        TestRailSettings.tests_suite,
                        TestRailSettings.tests_section,
                        ', '.join(fields_to_update.keys())))
                testrail_project.update_case(case_id=testrail_case['id'],
                                             fields=fields_to_update)
            else:
                logger.debug('Skipping "{0}" test case uploading because '
                             'it is up-to-date in "{1}" suite'.format(
                                 test_case[GROUP_FIELD],
                                 TestRailSettings.tests_suite))

        else:
            for case_field, default_value in custom_cases_fields.items():
                if case_field not in test_case:
                    test_case[case_field] = default_value

            logger.debug('Uploading test "{0}" to TestRail project "{1}", '
                         'suite "{2}", section "{3}"'.format(
                             test_case[GROUP_FIELD], TestRailSettings.project,
                             TestRailSettings.tests_suite,
                             TestRailSettings.tests_section))
            testrail_project.add_case(section_id=section_id, case=test_case)
예제 #37
0
def get_build_artifact(url, artifact):
    """Return content of job build artifact
    """
    url = "/".join([url, 'artifact', artifact])
    logger.debug("Request artifact content from {}".format(url))
    return requests.get(url).text
예제 #38
0
파일: builds.py 프로젝트: dtsapikov/fuel-qa
def get_build_artifact(url, artifact):
    """Return content of job build artifact
    """
    url = "/".join([url, 'artifact', artifact])
    logger.debug("Request artifact content from {}".format(url))
    return requests.get(url).text
예제 #39
0
def main():
    parser = OptionParser(
        description="Publish results of system tests from Jenkins build to "
        "TestRail. See settings.py for configuration.")
    parser.add_option('-j',
                      '--job-name',
                      dest='job_name',
                      default=None,
                      help='Jenkins swarm runner job name')
    parser.add_option('-N',
                      '--build-number',
                      dest='build_number',
                      default='latest',
                      help='Jenkins swarm runner build number')
    parser.add_option("-l",
                      "--live",
                      dest="live_report",
                      action="store_true",
                      help="Get tests results from running swarm")
    parser.add_option("-v",
                      "--verbose",
                      action="store_true",
                      dest="verbose",
                      default=False,
                      help="Enable debug output")

    (options, _) = parser.parse_args()

    if options.verbose:
        logger.setLevel(DEBUG)

    if options.live_report and options.build_number == 'latest':
        build_number = 'latest_started'
    else:
        build_number = options.build_number

    # STEP #1
    # Initialize TestRail Project and define configuration
    logger.info('Initializing TestRail Project configuration...')
    project = TestRailProject(url=TestRailSettings.url,
                              user=TestRailSettings.user,
                              password=TestRailSettings.password,
                              project=TestRailSettings.project)
    logger.info('Initializing TestRail Project configuration... done')

    operation_systems = [{
        'name': config['name'],
        'id': config['id'],
        'distro': config['name'].split()[0].lower()
    } for config in project.get_config_by_name('Operation System')['configs']]
    os_mile = {
        '6.1': ['Centos 6.5', 'Ubuntu 14.04'],
        '6.0.1': ['Centos 6.5', 'Ubuntu 12.04']
    }

    tests_results = {}

    # STEP #2
    # Get tests results from Jenkins
    runner_build = Build(options.job_name, build_number)
    runs = runner_build.build_data['runs']

    # Analyze each test individually
    for run_one in runs:
        if '5.1' in run_one['url']:
            continue  # Release 5.1 to skip
        tests_result = get_job_info(run_one['url'])
        if not tests_result['description']:
            continue  # Not completed results to skip
        if 'skipping' in tests_result['description']:
            continue  # Not performed tests to skip
        tests_job = {
            'result': tests_result['result'],
            'name':
            (options.job_name + '/' + tests_result['url'].split('/')[-3]),
            'number': int(tests_result['url'].split('/')[-2]),
            'mile': (tests_result['description'].split()[0].split('-')[0]),
            'iso': (int(tests_result['description'].split()[0].split('-')[1]))
        }
        if tests_job['mile'] not in tests_results:
            tests_results[tests_job['mile']] = {}
        test_mile = tests_results[tests_job['mile']]
        if tests_job['iso'] not in test_mile:
            test_mile[tests_job['iso']] = {}
        test_iso = test_mile[tests_job['iso']]
        for os in operation_systems:
            if os['distro'] in tests_job['name'].lower() and\
                    os['name'] in os_mile[tests_job['mile']]:
                if os['id'] not in test_iso:
                    test_iso[os['id']] = []
                test_os_id = test_iso[os['id']]
                test_os_id.extend(get_tests_results(tests_job, os['distro']))

    # STEP #3
    # Create new TestPlan in TestRail (or get existing) and add TestRuns
    for mile in tests_results:
        mile_tests_suite = '{0}{1}'.format(TestRailSettings.tests_suite, mile)
        logger.info(mile_tests_suite)
        tests_suite = project.get_suite_by_name(mile_tests_suite)
        milestone = project.get_milestone_by_name(name=mile)
        for iso_number in tests_results.get(mile, {}):
            # Create new TestPlan name check the same name in testrail
            test_plan_name = '{milestone} iso #{iso_number}'.format(
                milestone=milestone['name'], iso_number=iso_number)
            test_plan = project.get_plan_by_name(test_plan_name)
            if not test_plan:
                test_plan = project.add_plan(test_plan_name,
                                             description='/'.join([
                                                 JENKINS['url'], 'job',
                                                 '{0}.all'.format(
                                                     milestone['name']),
                                                 str(iso_number)
                                             ]),
                                             milestone_id=milestone['id'],
                                             entries=[])
                logger.info(
                    'Created new TestPlan "{0}".'.format(test_plan_name))
            else:
                logger.info(
                    'Found existing TestPlan "{0}".'.format(test_plan_name))
            plan_entries = []
            # Create a test plan entry
            config_ids = []
            for os in operation_systems:
                if os['name'] in os_mile[mile]:
                    config_ids.append(os['id'])
                    cases_ids = []
                    plan_entries.append(
                        project.test_run_struct(
                            name=tests_suite['name'],
                            suite_id=tests_suite['id'],
                            milestone_id=milestone['id'],
                            description=('Results of system tests ({t_suite})'
                                         ' on iso #"{iso_number}"'.format(
                                             t_suite=tests_suite['name'],
                                             iso_number=iso_number)),
                            config_ids=[os['id']],
                            include_all=True,
                            case_ids=cases_ids))
            # Create a test plan entry with the test run
            run = find_run_by_name(test_plan, tests_suite['name'])
            if not run:
                logger.info('Adding a test plan entry with test run %s ...',
                            tests_suite['name'])
                entry = project.add_plan_entry(plan_id=test_plan['id'],
                                               suite_id=tests_suite['id'],
                                               config_ids=config_ids,
                                               runs=plan_entries)
                logger.info('The test plan entry has been added.')
                run = entry['runs'][0]
            test_plan = project.get_plan(test_plan['id'])

            # STEP #4
            # Upload tests results to TestRail
            logger.info('Uploading tests results to TestRail...')
            for os_id in tests_results.get(mile, {})\
                    .get(iso_number, {}):
                logger.info('Checking tests results for %s...',
                            project.get_config(os_id)['name'])
                tests_added = publish_results(
                    project=project,
                    milestone_id=milestone['id'],
                    test_plan=test_plan,
                    suite_id=tests_suite['id'],
                    config_id=os_id,
                    results=tests_results[mile][iso_number][os_id])
                logger.debug('Added new results for tests (%s): %s',
                             project.get_config(os_id)['name'],
                             [r.group for r in tests_added])

            logger.info('Report URL: %s', test_plan['url'])
def get_tests_descriptions(milestone_id, tests_include, tests_exclude, groups,
                           default_test_priority):
    discover_import_tests(get_basepath(), tests_directory)
    define_custom_groups()
    for one in groups:
        register_system_test_cases(one)
    plan = TestPlan.create_from_registry(DEFAULT_REGISTRY)
    all_plan_tests = plan.tests[:]

    tests = []

    for jenkins_suffix in groups:
        group = groups[jenkins_suffix]
        plan.filter(group_names=[group])
        for case in plan.tests:
            if not case.entry.info.enabled:
                continue
            home = case.entry.home
            if not hasattr(case.entry, 'parent'):
                # Not a real case, some stuff needed by template based tests
                continue
            parent_home = case.entry.parent.home
            case_state = case.state
            if issubclass(parent_home, ActionTest):
                case_name = parent_home.__name__
                test_group = parent_home.__name__
                if any([x['custom_test_group'] == test_group for x in tests]):
                    continue
            else:
                case_name = home.func_name
                test_group = case.entry.home.func_name
            if tests_include:
                if tests_include not in case_name:
                    logger.debug("Skipping '{0}' test because it doesn't "
                                 "contain '{1}' in method name".format(
                                     case_name, tests_include))
                    continue
            if tests_exclude:
                if tests_exclude in case_name:
                    logger.debug("Skipping '{0}' test because it contains"
                                 " '{1}' in method name".format(
                                     case_name, tests_exclude))
                    continue

            if issubclass(parent_home, ActionTest):
                docstring = parent_home.__doc__.split('\n')
                case_state.instance._load_config()
                configuration = case_state.instance.config_name
                docstring[0] = "{0} on {1}".format(docstring[0], configuration)
                docstring = '\n'.join(docstring)
            else:
                docstring = home.func_doc or ''
                configuration = None
            docstring = '\n'.join([s.strip() for s in docstring.split('\n')])

            steps = [{
                "content": s,
                "expected": "pass"
            } for s in docstring.split('\n') if s and s[0].isdigit()]

            test_duration = re.search(r'Duration\s+(\d+[s,m])\b', docstring)
            title = docstring.split('\n')[0] or case.entry.home.func_name

            if case.entry.home.func_name in GROUPS_TO_EXPAND:
                """Expand specified test names with the group names that are
                   used in jenkins jobs where this test is started.
                """
                title = ' - '.join([title, jenkins_suffix])
                test_group = '_'.join(
                    [case.entry.home.func_name, jenkins_suffix])
예제 #41
0
def get_job_info(url):
    job_url = "/".join([url, 'api/json'])
    logger.debug("Request job info from %s", job_url)
    return requests.get(job_url).json()
예제 #42
0
def get_job_info(url):
    job_url = "/".join([url, 'api/json'])
    logger.debug("Request job info from %s", job_url)
    return requests.get(job_url).json()
def main():
    parser = argparse.ArgumentParser(
        description="Generate statistics for bugs linked to TestRun. Publish "
        "statistics to testrail if necessary.")
    parser.add_argument('plan_id',
                        type=int,
                        nargs='?',
                        default=None,
                        help='Test plan ID in TestRail')
    parser.add_argument('-j',
                        '--job-name',
                        dest='job_name',
                        type=str,
                        default=None,
                        help='Name of Jenkins job which runs tests (runner). '
                        'It will be used for TestPlan search instead ID')
    parser.add_argument('-n',
                        '--build-number',
                        dest='build_number',
                        default='latest',
                        help='Jenkins job build number')
    parser.add_argument('-r',
                        '--run-id',
                        dest='run_ids',
                        type=str,
                        default=None,
                        help='(optional) IDs of TestRun to check (skip other)')
    parser.add_argument('-b',
                        '--handle-blocked',
                        action="store_true",
                        dest='handle_blocked',
                        default=False,
                        help='Copy bugs links to downstream blocked results')
    parser.add_argument('-s',
                        '--separate-runs',
                        action="store_true",
                        dest='separate_runs',
                        default=False,
                        help='Create separate statistics for each test run')
    parser.add_argument('-p',
                        '--publish',
                        action="store_true",
                        help='Publish statistics to TestPlan description')
    parser.add_argument('-o',
                        '--out-file',
                        dest='output_file',
                        default=None,
                        type=str,
                        help='Path to file to save statistics as JSON and/or '
                        'HTML. Filename extension is added automatically')
    parser.add_argument('-H',
                        '--html',
                        action="store_true",
                        help='Save statistics in HTML format to file '
                        '(used with --out-file option)')
    parser.add_argument('-q',
                        '--quiet',
                        action="store_true",
                        help='Be quiet (disable logging except critical) '
                        'Overrides "--verbose" option.')
    parser.add_argument("-v",
                        "--verbose",
                        action="store_true",
                        help="Enable debug logging.")

    args = parser.parse_args()

    if args.verbose:
        logger.setLevel(DEBUG)

    if args.quiet:
        logger.setLevel(CRITICAL)

    testrail_project = get_testrail()

    if args.job_name:
        logger.info('Inspecting {0} build of {1} Jenkins job for TestPlan '
                    'details...'.format(args.build_number, args.job_name))
        test_plan_name = generate_test_plan_name(args.job_name,
                                                 args.build_number)
        test_plan = testrail_project.get_plan_by_name(test_plan_name)
        if test_plan:
            args.plan_id = test_plan['id']
        else:
            logger.warning('TestPlan "{0}" not found!'.format(test_plan_name))

    if not args.plan_id:
        logger.error('There is no TestPlan to process, exiting...')
        return 1

    run_ids = () if not args.run_ids else tuple(
        int(arg) for arg in args.run_ids.split(','))

    generator = StatisticsGenerator(testrail_project, args.plan_id, run_ids,
                                    args.handle_blocked)
    generator.generate()
    stats = generator.dump()

    if args.publish:
        logger.debug('Publishing bugs statistics to TestRail..')
        generator.publish(stats)

    if args.output_file:
        html = generator.dump_html(stats) if args.html else args.html
        save_stats_to_file(stats, args.output_file, html)

        if args.separate_runs:
            for run in generator.test_runs_stats:
                file_name = '{0}_{1}'.format(args.output_file, run['id'])
                stats = generator.dump(run_id=run['id'])
                html = (generator.dump_html(stats, run['id'])
                        if args.html else args.html)
                save_stats_to_file(stats, file_name, html)

    logger.info('Statistics generation complete!')
예제 #44
0
파일: builds.py 프로젝트: dtsapikov/fuel-qa
 def get_job_console(self):
     job_url = "/".join([JENKINS["url"], 'job', self.name,
                         str(self.number), 'consoleText'])
     logger.debug("Request job console from {}".format(job_url))
     return requests.get(job_url).text.split('\n')
예제 #45
0
def get_tests_descriptions(milestone_id, testrail_default_test_priority,
                           testrail_project):
    # To get the Tempest tests list, need to execute the following commands:
    # git clone https://github.com/openstack/tempest & cd tempest & tox -evenv
    # .tox/venv/bin/pip install nose
    get_tempest_tests = ("cd tempest && .tox/venv/bin/nosetests "
                         "--collect-only tempest/{0} -v 2>&1 | grep 'id-.*'")
    get_commit = "cd tempest && git rev-parse HEAD"
    commit = subprocess.Popen(get_commit, shell=True, stdout=subprocess.PIPE)
    logger.info("Generate test suite for tempest"
                " commit:{}".format(commit.stdout.readline()))
    custom_cases_fields = _get_custom_cases_fields(
        case_fields=testrail_project.get_case_fields(),
        project_id=testrail_project.project['id'])
    tests = {}

    for group in TEST_GROUPS:
        p = subprocess.Popen(get_tempest_tests.format(group.lower()),
                             shell=True,
                             stdout=subprocess.PIPE)

        for line in iter(p.stdout.readline, b''):
            section = generate_groups(line) if group == "API" else group

            test_class = []
            for r in line.split("."):
                if "id-" in r:
                    title = r.strip()
                    break
                else:
                    test_class.append(r)

            steps = [
                {
                    "run this tempest test": "passed"
                },
            ]

            test_case = {
                "title": title,
                "type_id": 1,
                "milestone_id": milestone_id,
                "priority_id": testrail_default_test_priority,
                "estimate": "1m",
                "refs": "",
                "custom_report_label": title.split('id-')[1][:-1],
                "custom_test_group": ".".join(test_class),
                "custom_test_case_description": title,
                "custom_test_case_steps": steps,
                "section": section
            }
            for case_field, default_value in custom_cases_fields.items():
                if case_field not in test_case:
                    test_case[case_field] = default_value
            if section not in tests:
                tests[section] = []
            tests[section].append(test_case)
    logger.debug(tests)
    logger.info("total test cases: "
                "{}".format(sum(map(lambda x: len(x), tests.values()))))
    return tests
예제 #46
0
def main():

    parser = OptionParser(
        description="Publish results of system tests from Jenkins build to "
        "TestRail. See settings.py for configuration.")
    parser.add_option('-j',
                      '--job-name',
                      dest='job_name',
                      default=None,
                      help='Jenkins swarm runner job name')
    parser.add_option('-N',
                      '--build-number',
                      dest='build_number',
                      default='latest',
                      help='Jenkins swarm runner build number')
    parser.add_option('-o',
                      '--one-job',
                      dest='one_job_name',
                      default=None,
                      help=('Process only one job name from the specified '
                            'parent job or view'))
    parser.add_option("-w",
                      "--view",
                      dest="jenkins_view",
                      default=False,
                      help="Get system tests jobs from Jenkins view")
    parser.add_option("-l",
                      "--live",
                      dest="live_report",
                      action="store_true",
                      help="Get tests results from running swarm")
    parser.add_option("-m",
                      "--manual",
                      dest="manual_run",
                      action="store_true",
                      help="Manually add tests cases to TestRun (tested only)")
    parser.add_option('-c',
                      '--create-plan-only',
                      action="store_true",
                      dest="create_plan_only",
                      default=False,
                      help='Jenkins swarm runner job name')
    parser.add_option("-v",
                      "--verbose",
                      action="store_true",
                      dest="verbose",
                      default=False,
                      help="Enable debug output")

    (options, _) = parser.parse_args()

    if options.verbose:
        logger.setLevel(DEBUG)

    if options.live_report and options.build_number == 'latest':
        options.build_number = 'latest_started'

    # STEP #1
    # Initialize TestRail Project and define configuration
    logger.info('Initializing TestRail Project configuration...')
    project = TestRailProject(url=TestRailSettings.url,
                              user=TestRailSettings.user,
                              password=TestRailSettings.password,
                              project=TestRailSettings.project)

    tests_suite = project.get_suite_by_name(TestRailSettings.tests_suite)
    operation_systems = [
        {
            'name': config['name'],
            'id': config['id'],
            'distro': config['name'].split()[0].lower()
        }
        for config in project.get_config_by_name('Operation System')['configs']
        if config['name'] in TestRailSettings.operation_systems
    ]
    tests_results = {os['distro']: [] for os in operation_systems}

    # STEP #2
    # Get tests results from Jenkins
    logger.info('Getting tests results from Jenkins...')
    if options.jenkins_view:
        jobs = get_jobs_for_view(options.jenkins_view)
        tests_jobs = [{'name': j, 'number': 'latest'}
                      for j in jobs if 'system_test' in j] if \
            not options.create_plan_only else []
        runner_job = [j for j in jobs if 'runner' in j][0]
        runner_build = Build(runner_job, 'latest')
    elif options.job_name:
        runner_build = Build(options.job_name, options.build_number)
        tests_jobs = get_downstream_builds(runner_build.build_data) if \
            not options.create_plan_only else []
    else:
        logger.error("Please specify either Jenkins swarm runner job name (-j)"
                     " or Jenkins view with system tests jobs (-w). Exiting..")
        return

    for systest_build in tests_jobs:
        if (options.one_job_name
                and options.one_job_name != systest_build['name']):
            logger.debug(
                "Skipping '{0}' because --one-job is specified".format(
                    systest_build['name']))
            continue
        if options.job_name:
            if 'result' not in systest_build.keys():
                logger.debug("Skipping '{0}' job because it does't run tests "
                             "(build #{1} contains no results)".format(
                                 systest_build['name'],
                                 systest_build['number']))
                continue
            if systest_build['result'] is None:
                logger.debug("Skipping '{0}' job (build #{1}) because it's sti"
                             "ll running...".format(
                                 systest_build['name'],
                                 systest_build['number'],
                             ))
                continue
        for os in tests_results.keys():
            if os in systest_build['name'].lower():
                tests_results[os].extend(get_tests_results(systest_build, os))

    # STEP #3
    # Create new TestPlan in TestRail (or get existing) and add TestRuns
    milestone, iso_number, prefix = get_version(runner_build.build_data)
    milestone = project.get_milestone_by_name(name=milestone)

    test_plan_name = ' '.join(
        filter(lambda x: bool(x),
               (milestone['name'], prefix, 'iso', '#' + str(iso_number))))

    test_plan = project.get_plan_by_name(test_plan_name)
    iso_link = '/'.join([
        JENKINS['url'], 'job', '{0}.all'.format(milestone['name']),
        str(iso_number)
    ])
    if not test_plan:
        test_plan = project.add_plan(test_plan_name,
                                     description=iso_link,
                                     milestone_id=milestone['id'],
                                     entries=[])
        logger.info('Created new TestPlan "{0}".'.format(test_plan_name))
    else:
        logger.info('Found existing TestPlan "{0}".'.format(test_plan_name))

    if options.create_plan_only:
        return

    plan_entries = []
    all_cases = project.get_cases(suite_id=tests_suite['id'])
    for os in operation_systems:
        cases_ids = []
        if options.manual_run:
            all_results_groups = [r.group for r in tests_results[os['distro']]]
            for case in all_cases:
                if case['custom_test_group'] in all_results_groups:
                    cases_ids.append(case['id'])
        plan_entries.append(
            project.test_run_struct(
                name='{suite_name}'.format(suite_name=tests_suite['name']),
                suite_id=tests_suite['id'],
                milestone_id=milestone['id'],
                description='Results of system tests ({tests_suite}) on is'
                'o #"{iso_number}"'.format(tests_suite=tests_suite['name'],
                                           iso_number=iso_number),
                config_ids=[os['id']],
                include_all=True,
                case_ids=cases_ids))

    if not any(entry['suite_id'] == tests_suite['id']
               for entry in test_plan['entries']):
        if project.add_plan_entry(
                plan_id=test_plan['id'],
                suite_id=tests_suite['id'],
                config_ids=[os['id'] for os in operation_systems],
                runs=plan_entries):
            test_plan = project.get_plan(test_plan['id'])

    # STEP #4
    # Upload tests results to TestRail
    logger.info('Uploading tests results to TestRail...')
    for os in operation_systems:
        logger.info('Checking tests results for "{0}"...'.format(os['name']))
        results_to_publish = publish_results(
            project=project,
            milestone_id=milestone['id'],
            test_plan=test_plan,
            suite_id=tests_suite['id'],
            config_id=os['id'],
            results=tests_results[os['distro']])
        logger.debug('Added new results for tests ({os}): {tests}'.format(
            os=os['name'], tests=[r.group for r in results_to_publish]))

    logger.info('Report URL: {0}'.format(test_plan['url']))
예제 #47
0
파일: report_pi.py 프로젝트: avgoor/fuel-qa
def main():
    parser = OptionParser(
        description="Publish results of system tests from Jenkins build to "
                    "TestRail. See settings.py for configuration."
    )
    parser.add_option('-j', '--job-name', dest='job_name', default=None,
                      help='Jenkins swarm runner job name')
    parser.add_option('-N', '--build-number', dest='build_number',
                      default='latest',
                      help='Jenkins swarm runner build number')
    parser.add_option("-l", "--live", dest="live_report", action="store_true",
                      help="Get tests results from running swarm")
    parser.add_option("-v", "--verbose",
                      action="store_true", dest="verbose", default=False,
                      help="Enable debug output")

    (options, _) = parser.parse_args()

    if options.verbose:
        logger.setLevel(DEBUG)

    if options.live_report and options.build_number == 'latest':
        build_number = 'latest_started'
    else:
        build_number = options.build_number

    # STEP #1
    # Initialize TestRail Project and define configuration
    logger.info('Initializing TestRail Project configuration...')
    project = TestRailProject(url=TestRailSettings.url,
                              user=TestRailSettings.user,
                              password=TestRailSettings.password,
                              project=TestRailSettings.project)
    logger.info('Initializing TestRail Project configuration... done')

    operation_systems = [{'name': config['name'], 'id': config['id'],
                          'distro': config['name'].split()[0].lower()}
                         for config in project.get_config_by_name(
                             'Operation System')['configs']]
    os_mile = {'6.1': ['Centos 6.5', 'Ubuntu 14.04'],
               '6.0.1': ['Centos 6.5', 'Ubuntu 12.04']}

    tests_results = {}

    # STEP #2
    # Get tests results from Jenkins
    runner_build = Build(options.job_name, build_number)
    runs = runner_build.build_data['runs']

    # Analyze each test individually
    for run_one in runs:
        if '5.1' in run_one['url']:
            continue  # Release 5.1 to skip
        tests_result = get_job_info(run_one['url'])
        if not tests_result['description']:
            continue  # Not completed results to skip
        if 'skipping' in tests_result['description']:
            continue  # Not performed tests to skip
        tests_job = {'result': tests_result['result'],
                     'name': (options.job_name + '/' +
                              tests_result['url'].split('/')[-3]),
                     'number': int(tests_result['url'].split('/')[-2]),
                     'mile': (tests_result['description'].
                              split()[0].split('-')[0]),
                     'iso': (int(tests_result['description'].
                             split()[0].split('-')[1]))}
        if tests_job['mile'] not in tests_results:
            tests_results[tests_job['mile']] = {}
        test_mile = tests_results[tests_job['mile']]
        if tests_job['iso'] not in test_mile:
            test_mile[tests_job['iso']] = {}
        test_iso = test_mile[tests_job['iso']]
        for os in operation_systems:
            if os['distro'] in tests_job['name'].lower() and\
                    os['name'] in os_mile[tests_job['mile']]:
                if os['id'] not in test_iso:
                    test_iso[os['id']] = []
                test_os_id = test_iso[os['id']]
                test_os_id.extend(get_tests_results(tests_job, os['distro']))

    # STEP #3
    # Create new TestPlan in TestRail (or get existing) and add TestRuns
    for mile in tests_results:
        mile_tests_suite = '{0}{1}'.format(TestRailSettings.tests_suite, mile)
        logger.info(mile_tests_suite)
        tests_suite = project.get_suite_by_name(mile_tests_suite)
        milestone = project.get_milestone_by_name(name=mile)
        for iso_number in tests_results.get(mile, {}):
            # Create new TestPlan name check the same name in testrail
            test_plan_name = '{milestone} iso #{iso_number}'.format(
                milestone=milestone['name'],
                iso_number=iso_number)
            test_plan = project.get_plan_by_name(test_plan_name)
            if not test_plan:
                test_plan = project.add_plan(
                    test_plan_name,
                    description='/'.join([JENKINS['url'],
                                          'job',
                                          '{0}.all'.format(milestone['name']),
                                          str(iso_number)]),
                    milestone_id=milestone['id'],
                    entries=[])
                logger.info('Created new TestPlan "{0}".'
                            .format(test_plan_name))
            else:
                logger.info('Found existing TestPlan "{0}".'
                            .format(test_plan_name))
            plan_entries = []
            # Create a test plan entry
            config_ids = []
            for os in operation_systems:
                if os['name'] in os_mile[mile]:
                    config_ids.append(os['id'])
                    cases_ids = []
                    plan_entries.append(
                        project.test_run_struct(
                            name=tests_suite['name'],
                            suite_id=tests_suite['id'],
                            milestone_id=milestone['id'],
                            description=('Results of system tests ({t_suite})'
                                         ' on iso #"{iso_number}"'
                                         .format(t_suite=tests_suite['name'],
                                                 iso_number=iso_number)),
                            config_ids=[os['id']],
                            include_all=True,
                            case_ids=cases_ids))
            # Create a test plan entry with the test run
            run = find_run_by_name(test_plan, tests_suite['name'])
            if not run:
                logger.info('Adding a test plan entry with test run %s ...',
                            tests_suite['name'])
                entry = project.add_plan_entry(plan_id=test_plan['id'],
                                               suite_id=tests_suite['id'],
                                               config_ids=config_ids,
                                               runs=plan_entries)
                logger.info('The test plan entry has been added.')
                run = entry['runs'][0]
            test_plan = project.get_plan(test_plan['id'])

            # STEP #4
            # Upload tests results to TestRail
            logger.info('Uploading tests results to TestRail...')
            for os_id in tests_results.get(mile, {})\
                    .get(iso_number, {}):
                logger.info('Checking tests results for %s...',
                            project.get_config(os_id)['name'])
                tests_added = publish_results(
                    project=project,
                    milestone_id=milestone['id'],
                    test_plan=test_plan,
                    suite_id=tests_suite['id'],
                    config_id=os_id,
                    results=tests_results[mile][iso_number][os_id])
                logger.debug('Added new results for tests (%s): %s',
                             project.get_config(os_id)['name'],
                             [r.group for r in tests_added])

            logger.info('Report URL: %s', test_plan['url'])
예제 #48
0
파일: report_pi.py 프로젝트: avgoor/fuel-qa
def get_job_info(url):
    job_url = "/".join([url, 'api/json'])
    logger.debug("Request job info from %s", job_url)
    return json.load(urlopen(job_url))
예제 #49
0
파일: builds.py 프로젝트: dtsapikov/fuel-qa
 def get_job_info(self, depth=1):
     job_url = "/".join([JENKINS["url"], 'job', self.name,
                         'api/json?depth={depth}'.format(depth=depth)])
     logger.debug("Request job info from {}".format(job_url))
     return requests.get(job_url).json()
예제 #50
0
파일: report.py 프로젝트: avgoor/fuel-qa
def main():

    parser = OptionParser(
        description="Publish results of system tests from Jenkins build to "
                    "TestRail. See settings.py for configuration."
    )
    parser.add_option('-j', '--job-name', dest='job_name', default=None,
                      help='Jenkins swarm runner job name')
    parser.add_option('-N', '--build-number', dest='build_number',
                      default='latest',
                      help='Jenkins swarm runner build number')
    parser.add_option('-o', '--one-job', dest='one_job_name',
                      default=None,
                      help=('Process only one job name from the specified '
                            'parent job or view'))
    parser.add_option("-w", "--view", dest="jenkins_view", default=False,
                      help="Get system tests jobs from Jenkins view")
    parser.add_option("-l", "--live", dest="live_report", action="store_true",
                      help="Get tests results from running swarm")
    parser.add_option("-m", "--manual", dest="manual_run", action="store_true",
                      help="Manually add tests cases to TestRun (tested only)")
    parser.add_option('-c', '--create-plan-only', action="store_true",
                      dest="create_plan_only", default=False,
                      help='Jenkins swarm runner job name')
    parser.add_option("-v", "--verbose",
                      action="store_true", dest="verbose", default=False,
                      help="Enable debug output")

    (options, _) = parser.parse_args()

    if options.verbose:
        logger.setLevel(DEBUG)

    if options.live_report and options.build_number == 'latest':
        options.build_number = 'latest_started'

    # STEP #1
    # Initialize TestRail Project and define configuration
    logger.info('Initializing TestRail Project configuration...')
    project = TestRailProject(url=TestRailSettings.url,
                              user=TestRailSettings.user,
                              password=TestRailSettings.password,
                              project=TestRailSettings.project)

    tests_suite = project.get_suite_by_name(TestRailSettings.tests_suite)
    operation_systems = [{'name': config['name'], 'id': config['id'],
                         'distro': config['name'].split()[0].lower()}
                         for config in project.get_config_by_name(
                             'Operation System')['configs'] if
                         config['name'] in TestRailSettings.operation_systems]
    tests_results = {os['distro']: [] for os in operation_systems}

    # STEP #2
    # Get tests results from Jenkins
    logger.info('Getting tests results from Jenkins...')
    if options.jenkins_view:
        jobs = get_jobs_for_view(options.jenkins_view)
        tests_jobs = [{'name': j, 'number': 'latest'}
                      for j in jobs if 'system_test' in j] if \
            not options.create_plan_only else []
        runner_job = [j for j in jobs if 'runner' in j][0]
        runner_build = Build(runner_job, 'latest')
    elif options.job_name:
        runner_build = Build(options.job_name, options.build_number)
        tests_jobs = get_downstream_builds(runner_build.build_data) if \
            not options.create_plan_only else []
    else:
        logger.error("Please specify either Jenkins swarm runner job name (-j)"
                     " or Jenkins view with system tests jobs (-w). Exiting..")
        return

    for systest_build in tests_jobs:
        if (options.one_job_name and
                options.one_job_name != systest_build['name']):
            logger.debug("Skipping '{0}' because --one-job is specified"
                         .format(systest_build['name']))
            continue
        if options.job_name:
            if 'result' not in systest_build.keys():
                logger.debug("Skipping '{0}' job because it does't run tests "
                             "(build #{1} contains no results)".format(
                                 systest_build['name'],
                                 systest_build['number']))
                continue
            if systest_build['result'] is None:
                logger.debug("Skipping '{0}' job (build #{1}) because it's sti"
                             "ll running...".format(systest_build['name'],
                                                    systest_build['number'],))
                continue
        for os in tests_results.keys():
            if os in systest_build['name'].lower():
                tests_results[os].extend(get_tests_results(systest_build, os))

    # STEP #3
    # Create new TestPlan in TestRail (or get existing) and add TestRuns
    milestone, iso_number, prefix = get_version(runner_build.build_data)
    milestone = project.get_milestone_by_name(name=milestone)

    test_plan_name = ' '.join(
        filter(lambda x: bool(x),
               (milestone['name'], prefix, 'iso', '#' + str(iso_number))))

    test_plan = project.get_plan_by_name(test_plan_name)
    iso_link = '/'.join([JENKINS['url'], 'job',
                         '{0}.all'.format(milestone['name']), str(iso_number)])
    if not test_plan:
        test_plan = project.add_plan(test_plan_name,
                                     description=iso_link,
                                     milestone_id=milestone['id'],
                                     entries=[]
                                     )
        logger.info('Created new TestPlan "{0}".'.format(test_plan_name))
    else:
        logger.info('Found existing TestPlan "{0}".'.format(test_plan_name))

    if options.create_plan_only:
        return

    plan_entries = []
    all_cases = project.get_cases(suite_id=tests_suite['id'])
    for os in operation_systems:
        cases_ids = []
        if options.manual_run:
            all_results_groups = [r.group for r in tests_results[os['distro']]]
            for case in all_cases:
                if case['custom_test_group'] in all_results_groups:
                    cases_ids.append(case['id'])
        plan_entries.append(
            project.test_run_struct(
                name='{suite_name}'.format(suite_name=tests_suite['name']),
                suite_id=tests_suite['id'],
                milestone_id=milestone['id'],
                description='Results of system tests ({tests_suite}) on is'
                'o #"{iso_number}"'.format(tests_suite=tests_suite['name'],
                                           iso_number=iso_number),
                config_ids=[os['id']],
                include_all=True,
                case_ids=cases_ids
            )
        )

    if not any(entry['suite_id'] == tests_suite['id']
               for entry in test_plan['entries']):
        if project.add_plan_entry(plan_id=test_plan['id'],
                                  suite_id=tests_suite['id'],
                                  config_ids=[os['id'] for os
                                              in operation_systems],
                                  runs=plan_entries):
            test_plan = project.get_plan(test_plan['id'])

    # STEP #4
    # Upload tests results to TestRail
    logger.info('Uploading tests results to TestRail...')
    for os in operation_systems:
        logger.info('Checking tests results for "{0}"...'.format(os['name']))
        results_to_publish = publish_results(
            project=project,
            milestone_id=milestone['id'],
            test_plan=test_plan,
            suite_id=tests_suite['id'],
            config_id=os['id'],
            results=tests_results[os['distro']]
        )
        logger.debug('Added new results for tests ({os}): {tests}'.format(
            os=os['name'], tests=[r.group for r in results_to_publish]
        ))

    logger.info('Report URL: {0}'.format(test_plan['url']))
def main():
    parser = argparse.ArgumentParser(
        description="Generate statistics for bugs linked to TestRun. Publish " "statistics to testrail if necessary."
    )
    parser.add_argument("plan_id", type=int, nargs="?", default=None, help="Test plan ID in TestRail")
    parser.add_argument(
        "-j",
        "--job-name",
        dest="job_name",
        type=str,
        default=None,
        help="Name of Jenkins job which runs tests (runner). " "It will be used for TestPlan search instead ID",
    )
    parser.add_argument("-n", "--build-number", dest="build_number", default="latest", help="Jenkins job build number")
    parser.add_argument(
        "-r", "--run-id", dest="run_ids", type=str, default=None, help="(optional) IDs of TestRun to check (skip other)"
    )
    parser.add_argument(
        "-b",
        "--handle-blocked",
        action="store_true",
        dest="handle_blocked",
        default=False,
        help="Copy bugs links to downstream blocked results",
    )
    parser.add_argument(
        "-s",
        "--separate-runs",
        action="store_true",
        dest="separate_runs",
        default=False,
        help="Create separate statistics for each test run",
    )
    parser.add_argument("-p", "--publish", action="store_true", help="Publish statistics to TestPlan description")
    parser.add_argument(
        "-o",
        "--out-file",
        dest="output_file",
        default=None,
        type=str,
        help="Path to file to save statistics as JSON and/or " "HTML. Filename extension is added automatically",
    )
    parser.add_argument(
        "-H",
        "--html",
        action="store_true",
        help="Save statistics in HTML format to file " "(used with --out-file option)",
    )
    parser.add_argument(
        "-q",
        "--quiet",
        action="store_true",
        help="Be quiet (disable logging except critical) " 'Overrides "--verbose" option.',
    )
    parser.add_argument("-v", "--verbose", action="store_true", help="Enable debug logging.")

    args = parser.parse_args()

    if args.verbose:
        logger.setLevel(DEBUG)

    if args.quiet:
        logger.setLevel(CRITICAL)

    testrail_project = get_testrail()

    if args.job_name:
        logger.info(
            "Inspecting {0} build of {1} Jenkins job for TestPlan "
            "details...".format(args.build_number, args.job_name)
        )
        test_plan_name = generate_test_plan_name(args.job_name, args.build_number)
        test_plan = testrail_project.get_plan_by_name(test_plan_name)
        if test_plan:
            args.plan_id = test_plan["id"]
        else:
            logger.warning('TestPlan "{0}" not found!'.format(test_plan_name))

    if not args.plan_id:
        logger.error("There is no TestPlan to process, exiting...")
        return 1

    run_ids = () if not args.run_ids else tuple(int(arg) for arg in args.run_ids.split(","))

    generator = StatisticsGenerator(testrail_project, args.plan_id, run_ids, args.handle_blocked)
    generator.generate()
    stats = generator.dump()

    if args.publish:
        logger.debug("Publishing bugs statistics to TestRail..")
        generator.publish(stats)

    if args.output_file:
        html = generator.dump_html(stats) if args.html else args.html
        save_stats_to_file(stats, args.output_file, html)

        if args.separate_runs:
            for run in generator.test_runs_stats:
                file_name = "{0}_{1}".format(args.output_file, run["id"])
                stats = generator.dump(run_id=run["id"])
                html = generator.dump_html(stats, run["id"]) if args.html else args.html
                save_stats_to_file(stats, file_name, html)

    logger.info("Statistics generation complete!")
예제 #52
0
def main():
    parser = argparse.ArgumentParser(
        description="Generate statistics for bugs linked to TestRun. Publish "
                    "statistics to testrail if necessary."
    )
    parser.add_argument('plan_id', type=int, nargs='?', default=None,
                        help='Test plan ID in TestRail')
    parser.add_argument('-j', '--job-name',
                        dest='job_name', type=str, default=None,
                        help='Name of Jenkins job which runs tests (runner). '
                             'It will be used for TestPlan search instead ID')
    parser.add_argument('-n', '--build-number', dest='build_number',
                        default='latest', help='Jenkins job build number')
    parser.add_argument('-r', '--run-id',
                        dest='run_ids', type=str, default=None,
                        help='(optional) IDs of TestRun to check (skip other)')
    parser.add_argument('-b', '--handle-blocked', action="store_true",
                        dest='handle_blocked', default=False,
                        help='Copy bugs links to downstream blocked results')
    parser.add_argument('-s', '--separate-runs', action="store_true",
                        dest='separate_runs', default=False,
                        help='Create separate statistics for each test run')
    parser.add_argument('-p', '--publish', action="store_true",
                        help='Publish statistics to TestPlan description')
    parser.add_argument('-o', '--out-file', dest='output_file',
                        default=None, type=str,
                        help='Path to file to save statistics as JSON and/or '
                             'HTML. Filename extension is added automatically')
    parser.add_argument('-H', '--html', action="store_true",
                        help='Save statistics in HTML format to file '
                             '(used with --out-file option)')
    parser.add_argument('-q', '--quiet', action="store_true",
                        help='Be quiet (disable logging except critical) '
                             'Overrides "--verbose" option.')
    parser.add_argument("-v", "--verbose", action="store_true",
                        help="Enable debug logging.")

    args = parser.parse_args()

    if args.verbose:
        logger.setLevel(DEBUG)

    if args.quiet:
        logger.setLevel(CRITICAL)

    testrail_project = get_testrail()

    if args.job_name:
        logger.info('Inspecting {0} build of {1} Jenkins job for TestPlan '
                    'details...'.format(args.build_number, args.job_name))
        test_plan_name = generate_test_plan_name(args.job_name,
                                                 args.build_number)
        test_plan = testrail_project.get_plan_by_name(test_plan_name)
        if test_plan:
            args.plan_id = test_plan['id']
        else:
            logger.warning('TestPlan "{0}" not found!'.format(test_plan_name))

    if not args.plan_id:
        logger.error('There is no TestPlan to process, exiting...')
        return 1

    run_ids = () if not args.run_ids else tuple(
        int(arg) for arg in args.run_ids.split(','))

    generator = StatisticsGenerator(testrail_project,
                                    args.plan_id,
                                    run_ids,
                                    args.handle_blocked)
    generator.generate()
    stats = generator.dump()

    if args.publish:
        logger.debug('Publishing bugs statistics to TestRail..')
        generator.publish(stats)

    if args.output_file:
        html = generator.dump_html(stats) if args.html else args.html
        save_stats_to_file(stats, args.output_file, html)

        if args.separate_runs:
            for run in generator.test_runs_stats:
                file_name = '{0}_{1}'.format(args.output_file, run['id'])
                stats = generator.dump(run_id=run['id'])
                html = (generator.dump_html(stats, run['id']) if args.html
                        else args.html)
                save_stats_to_file(stats, file_name, html)

    logger.info('Statistics generation complete!')
예제 #53
0
def get_job_info(url):
    job_url = "/".join([url, 'api/json'])
    logger.debug("Request job info from %s", job_url)
    return json.load(urlopen(job_url))