예제 #1
0
    def handle_blocked(self, test, result):
        if result['custom_launchpad_bug']:
            return False
        m = re.search(r'Blocked by "(\S+)" test.', result['comment'] or '')
        if m:
            blocked_test_group = m.group(1)
        else:
            logger.warning('Blocked result #{0} for test {1} does '
                           'not have upstream test name in its '
                           'comments!'.format(result['id'],
                                              test['custom_test_group']))
            return False

        if not result['version']:
            logger.debug('Blocked result #{0} for test {1} does '
                         'not have version, can\'t find upstream '
                         'test case!'.format(result['id'],
                                             test['custom_test_group']))
            return False

        bug_link = None
        blocked_test = self.get_test_by_group(blocked_test_group,
                                              result['version'])
        if not blocked_test:
            return False
        logger.debug('Test {0} was blocked by failed test {1}'.format(
            test['custom_test_group'], blocked_test_group))

        blocked_results = self.project.get_results_for_test(
            blocked_test['id'])

        # Since we manually add results to failed tests with statuses
        # ProdFailed, TestFailed, etc. and attach bugs links to them,
        # we could skip original version copying. So look for test
        # results with target version, but allow to copy links to bugs
        # from other results of the same test (newer are checked first)
        if not any(br['version'] == result['version'] and
                   br['status_id'] in self.failed_statuses
                   for br in blocked_results):
            logger.debug('Did not find result for test {0} with version '
                         '{1}!'.format(blocked_test_group, result['version']))
            return False

        for blocked_result in sorted(blocked_results,
                                     key=lambda x: x['id'],
                                     reverse=True):
            if blocked_result['status_id'] not in self.failed_statuses:
                continue

            if blocked_result['custom_launchpad_bug']:
                bug_link = blocked_result['custom_launchpad_bug']
                break

        if bug_link is not None:
            result['custom_launchpad_bug'] = bug_link
            self.project.add_raw_results_for_test(test['id'], result)
            logger.info('Added bug {0} to blocked result of {1} test.'.format(
                bug_link, test['custom_test_group']))
            return bug_link
        return False
예제 #2
0
def get_existing_bug_link(previous_results):
    results_with_bug = [
        result for result in previous_results
        if result["custom_launchpad_bug"] is not None
    ]
    if not results_with_bug:
        return
    for result in sorted(results_with_bug,
                         key=lambda k: k['created_on'],
                         reverse=True):
        try:
            bug_id = int(
                result["custom_launchpad_bug"].strip('/').split('/')[-1])
        except ValueError:
            logger.warning('Link "{0}" doesn\'t contain bug id.'.format(
                result["custom_launchpad_bug"]))
            continue
        try:
            bug = LaunchpadBug(bug_id).get_duplicate_of()
        except KeyError:
            logger.warning("Bug with id '{bug_id}' is private or \
                doesn't exist.".format(bug_id=bug_id))
            continue
        except Exception:
            logger.exception("Strange situation with '{bug_id}' \
                issue".format(bug_id=bug_id))
            continue

        for target in bug.targets:
            if target['project'] == LaunchpadSettings.project and\
               target['milestone'] == LaunchpadSettings.milestone and\
               target['status'] not in LaunchpadSettings.closed_statuses:
                target['bug_link'] = result["custom_launchpad_bug"]
                return target
예제 #3
0
파일: report.py 프로젝트: avgoor/fuel-qa
def get_existing_bug_link(previous_results):
    results_with_bug = [result for result in previous_results if
                        result["custom_launchpad_bug"] is not None]
    if not results_with_bug:
        return
    for result in sorted(results_with_bug,
                         key=lambda k: k['created_on'],
                         reverse=True):
        try:
            bug_id = int(result["custom_launchpad_bug"].strip('/').split(
                '/')[-1])
        except ValueError:
            logger.warning('Link "{0}" doesn\'t contain bug id.'.format(
                result["custom_launchpad_bug"]))
            continue
        try:
            bug = LaunchpadBug(bug_id).get_duplicate_of()
        except KeyError:
            logger.warning("Bug with id '{bug_id}' is private or \
                doesn't exist.".format(bug_id=bug_id))
            continue
        except Exception:
            logger.exception("Strange situation with '{bug_id}' \
                issue".format(bug_id=bug_id))
            continue

        for target in bug.targets:
            if target['project'] == LaunchpadSettings.project and\
               target['milestone'] == LaunchpadSettings.milestone and\
               target['status'] not in LaunchpadSettings.closed_statuses:
                target['bug_link'] = result["custom_launchpad_bug"]
                return target
예제 #4
0
파일: builds.py 프로젝트: dtsapikov/fuel-qa
    def test_data(self, result_path=None):
        try:
            data = self.get_test_data(self.url, result_path)
        except Exception as e:
            logger.warning("No test data for {0}: {1}".format(
                self.url,
                e,
            ))
            # If we failed to get any tests for the build, return
            # meta test case 'jenkins' with status 'failed'.
            data = {
                "suites": [
                    {
                        "cases": [
                            {
                                "name": "jenkins",
                                "className": "jenkins",
                                "status": "failed",
                                "duration": 0
                            }
                        ]
                    }
                ]
            }

        return data
예제 #5
0
    def handle_blocked(self, test, result):
        if result['custom_launchpad_bug']:
            return False
        m = re.search(r'Blocked by "(\S+)" test.', result['comment'] or '')
        if m:
            blocked_test_group = m.group(1)
        else:
            logger.warning('Blocked result #{0} for test {1} does '
                           'not have upstream test name in its '
                           'comments!'.format(result['id'],
                                              test['custom_test_group']))
            return False

        if not result['version']:
            logger.debug('Blocked result #{0} for test {1} does '
                         'not have version, can\'t find upstream '
                         'test case!'.format(result['id'],
                                             test['custom_test_group']))
            return False

        bug_link = None
        blocked_test = self.get_test_by_group(blocked_test_group,
                                              result['version'])
        if not blocked_test:
            return False
        logger.debug('Test {0} was blocked by failed test {1}'.format(
            test['custom_test_group'], blocked_test_group))

        blocked_results = self.project.get_results_for_test(blocked_test['id'])

        # Since we manually add results to failed tests with statuses
        # ProdFailed, TestFailed, etc. and attach bugs links to them,
        # we could skip original version copying. So look for test
        # results with target version, but allow to copy links to bugs
        # from other results of the same test (newer are checked first)
        if not any(br['version'] == result['version']
                   and br['status_id'] in self.failed_statuses
                   for br in blocked_results):
            logger.debug('Did not find result for test {0} with version '
                         '{1}!'.format(blocked_test_group, result['version']))
            return False

        for blocked_result in sorted(blocked_results,
                                     key=lambda x: x['id'],
                                     reverse=True):
            if blocked_result['status_id'] not in self.failed_statuses:
                continue

            if blocked_result['custom_launchpad_bug']:
                bug_link = blocked_result['custom_launchpad_bug']
                break

        if bug_link is not None:
            result['custom_launchpad_bug'] = bug_link
            self.project.add_raw_results_for_test(test['id'], result)
            logger.info('Added bug {0} to blocked result of {1} test.'.format(
                bug_link, test['custom_test_group']))
            return bug_link
        return False
예제 #6
0
def get_tests_descriptions(milestone_id, tests_include, tests_exclude, groups,
                           default_test_priority):
    plan = _create_test_plan_from_registry(groups=groups)
    all_plan_tests = plan.tests[:]

    tests = []

    for jenkins_suffix in groups:
        group = groups[jenkins_suffix]
        plan.filter(group_names=[group])
        for case in plan.tests:
            if not _is_case_processable(case=case, tests=tests):
                continue

            case_name = test_group = _get_test_case_name(case)

            if _is_not_included(case_name, tests_include) or \
                    _is_excluded(case_name, tests_exclude):
                continue

            docstring = _get_docstring(parent_home=case.entry.parent.home,
                                       case_state=case.state,
                                       home=case.entry.home)

            title, steps, duration = _parse_docstring(docstring, case)

            if case.entry.home.func_name in GROUPS_TO_EXPAND:
                """Expand specified test names with the group names that are
                   used in jenkins jobs where this test is started.
                """
                title = ' - '.join([title, jenkins_suffix])
                test_group = '_'.join([case.entry.home.func_name,
                                       jenkins_suffix])

            test_case = {
                "title": title,
                "type_id": 1,
                "milestone_id": milestone_id,
                "priority_id": default_test_priority,
                "estimate": duration,
                "refs": "",
                "custom_test_group": test_group,
                "custom_test_case_description": docstring or " ",
                "custom_test_case_steps": steps
            }

            if not any([x[GROUP_FIELD] == test_group for x in tests]):
                tests.append(test_case)
            else:
                logger.warning("Testcase '{0}' run in multiple Jenkins jobs!"
                               .format(test_group))

        plan.tests = all_plan_tests[:]

    return tests
    def dump(self, run_id=None):
        stats = dict()

        if not run_id:
            joint_bugs_statistics = dict()
            for run in self.bugs_statistics:
                for bug, tests in self.bugs_statistics[run].items():
                    if bug in joint_bugs_statistics:
                        joint_bugs_statistics[bug].update(tests)
                    else:
                        joint_bugs_statistics[bug] = tests
        else:
            for _run_id, _stats in self.bugs_statistics.items():
                if _run_id == run_id:
                    joint_bugs_statistics = _stats

        for bug_id in joint_bugs_statistics:
            try:
                lp_bug = LaunchpadBug(bug_id).get_duplicate_of()
            except KeyError:
                logger.warning("Bug with ID {0} not found! Most probably it's "
                               "private or private security.".format(bug_id))
                continue
            bug_target = inspect_bug(lp_bug)

            if lp_bug.bug.id in stats:
                stats[lp_bug.bug.id]['tests'].update(
                    joint_bugs_statistics[bug_id])
            else:
                stats[lp_bug.bug.id] = {
                    'title': bug_target['title'],
                    'importance': bug_target['importance'],
                    'status': bug_target['status'],
                    'project': bug_target['project'],
                    'link': lp_bug.bug.web_link,
                    'tests': joint_bugs_statistics[bug_id]
                }
            stats[lp_bug.bug.id]['failed_num'] = len([
                t for t, v in stats[lp_bug.bug.id]['tests'].items()
                if not v['blocked']
            ])
            stats[lp_bug.bug.id]['blocked_num'] = len([
                t for t, v in stats[lp_bug.bug.id]['tests'].items()
                if v['blocked']
            ])

        return OrderedDict(
            sorted(stats.items(),
                   key=lambda x: (x[1]['failed_num'] + x[1]['blocked_num']),
                   reverse=True))
예제 #8
0
    def dump(self, run_id=None):
        stats = dict()

        if not run_id:
            joint_bugs_statistics = dict()
            for run in self.bugs_statistics:
                for bug, tests in self.bugs_statistics[run].items():
                    if bug in joint_bugs_statistics:
                        joint_bugs_statistics[bug].update(tests)
                    else:
                        joint_bugs_statistics[bug] = tests
        else:
            for _run_id, _stats in self.bugs_statistics.items():
                if _run_id == run_id:
                    joint_bugs_statistics = _stats

        for bug_id in joint_bugs_statistics:
            try:
                lp_bug = LaunchpadBug(bug_id).get_duplicate_of()
            except KeyError:
                logger.warning("Bug with ID {0} not found! Most probably it's "
                               "private or private security.".format(bug_id))
                continue
            bug_target = inspect_bug(lp_bug)

            if lp_bug.bug.id in stats:
                stats[lp_bug.bug.id]['tests'].update(
                    joint_bugs_statistics[bug_id])
            else:
                stats[lp_bug.bug.id] = {
                    'title': bug_target['title'],
                    'importance': bug_target['importance'],
                    'status': bug_target['status'],
                    'project': bug_target['project'],
                    'link': lp_bug.bug.web_link,
                    'tests': joint_bugs_statistics[bug_id]
                }
            stats[lp_bug.bug.id]['failed_num'] = len(
                [t for t, v in stats[lp_bug.bug.id]['tests'].items()
                 if not v['blocked']])
            stats[lp_bug.bug.id]['blocked_num'] = len(
                [t for t, v in stats[lp_bug.bug.id]['tests'].items()
                 if v['blocked']])

        return OrderedDict(sorted(stats.items(),
                                  key=lambda x: (x[1]['failed_num'] +
                                                 x[1]['blocked_num']),
                                  reverse=True))
예제 #9
0
    def test_data(self, result_path=None):
        try:
            data = self.get_test_data(self.url, result_path)
        except Exception as e:
            logger.warning("No test data for {0}: {1}".format(
                self.url,
                e,
            ))
            # If we failed to get any tests for the build, return
            # meta test case 'jenkins' with status 'failed'.
            data = {
                "suites": [{
                    "cases": [{
                        "name": "jenkins",
                        "className": "jenkins",
                        "status": "failed",
                        "duration": 0
                    }]
                }]
            }

        return data
예제 #10
0
파일: report.py 프로젝트: avgoor/fuel-qa
def get_tests_results(systest_build, os):
    tests_results = []
    test_build = Build(systest_build['name'], systest_build['number'])
    run_test_data = test_build.test_data()
    test_classes = {}
    for one in run_test_data['suites'][0]['cases']:
        class_name = one['className']
        if class_name not in test_classes:
            test_classes[class_name] = {}
            test_classes[class_name]['child'] = []
            test_classes[class_name]['duration'] = 0
            test_classes[class_name]["failCount"] = 0
            test_classes[class_name]["passCount"] = 0
            test_classes[class_name]["skipCount"] = 0
        else:
            if one['className'] == one['name']:
                logger.warning("Found duplicate test in run - {}".format(
                    one['className']))
                continue

        test_class = test_classes[class_name]
        test_class['child'].append(one)
        test_class['duration'] += float(one['duration'])
        if one['status'].lower() in ('failed', 'error'):
            test_class["failCount"] += 1
        if one['status'].lower() == 'passed':
            test_class["passCount"] += 1
        if one['status'].lower() == 'skipped':
            test_class["skipCount"] += 1

    for klass in test_classes:
        klass_result = test_classes[klass]
        if len(klass_result['child']) == 1:
            test = klass_result['child'][0]
            if check_untested(test):
                continue
            check_blocked(test)
            test_result = TestResult(
                name=test['name'],
                group=expand_test_group(test['className'],
                                        systest_build['name'],
                                        os),
                status=test['status'].lower(),
                duration='{0}s'.format(int(test['duration']) + 1),
                url='{0}testReport/(root)/{1}/'.format(test_build.url,
                                                       test['name']),
                version='_'.join(
                    [test_build.build_data["id"]] + (
                        test_build.build_data["description"] or
                        test['name']).split()),
                description=test_build.build_data["description"] or
                    test['name'],
                comments=test['skippedMessage']
            )
        else:
            case_steps = []
            test_duration = sum(
                [float(c['duration']) for c in klass_result['child']])
            steps = [c for c in klass_result['child']
                     if c['name'].startswith('Step')]
            steps = sorted(steps, key=lambda k: k['name'])
            test_name = steps[0]['className']
            test_group = steps[0]['className']
            test_comments = None
            is_test_failed = any([s['status'].lower() in ('failed', 'error')
                                  for s in steps])

            for step in steps:
                if step['status'].lower() in ('failed', 'error'):
                    case_steps.append({
                        "content": step['name'],
                        "actual": step['errorStackTrace'] or
                        step['errorDetails'],
                        "status": step['status'].lower()})
                    test_comments = "{err}\n\n\n{stack}".format(
                        err=step['errorDetails'],
                        stack=step['errorStackTrace'])
                else:
                    case_steps.append({
                        "content": step['name'],
                        "actual": "pass",
                        "status": step['status'].lower()
                    })
            test_result = TestResult(
                name=test_name,
                group=expand_test_group(test_group,
                                        systest_build['name'],
                                        os),
                status='failed' if is_test_failed else 'passed',
                duration='{0}s'.format(int(test_duration) + 1),
                url='{0}testReport/(root)/{1}/'.format(test_build.url,
                                                       test_name),
                version='_'.join(
                    [test_build.build_data["id"]] + (
                        test_build.build_data["description"] or
                        test_name).split()),
                description=test_build.build_data["description"] or
                    test_name,
                comments=test_comments,
                steps=case_steps,
            )
        tests_results.append(test_result)
    return tests_results
def main():
    parser = argparse.ArgumentParser(
        description="Generate statistics for bugs linked to TestRun. Publish " "statistics to testrail if necessary."
    )
    parser.add_argument("plan_id", type=int, nargs="?", default=None, help="Test plan ID in TestRail")
    parser.add_argument(
        "-j",
        "--job-name",
        dest="job_name",
        type=str,
        default=None,
        help="Name of Jenkins job which runs tests (runner). " "It will be used for TestPlan search instead ID",
    )
    parser.add_argument("-n", "--build-number", dest="build_number", default="latest", help="Jenkins job build number")
    parser.add_argument(
        "-r", "--run-id", dest="run_ids", type=str, default=None, help="(optional) IDs of TestRun to check (skip other)"
    )
    parser.add_argument(
        "-b",
        "--handle-blocked",
        action="store_true",
        dest="handle_blocked",
        default=False,
        help="Copy bugs links to downstream blocked results",
    )
    parser.add_argument(
        "-s",
        "--separate-runs",
        action="store_true",
        dest="separate_runs",
        default=False,
        help="Create separate statistics for each test run",
    )
    parser.add_argument("-p", "--publish", action="store_true", help="Publish statistics to TestPlan description")
    parser.add_argument(
        "-o",
        "--out-file",
        dest="output_file",
        default=None,
        type=str,
        help="Path to file to save statistics as JSON and/or " "HTML. Filename extension is added automatically",
    )
    parser.add_argument(
        "-H",
        "--html",
        action="store_true",
        help="Save statistics in HTML format to file " "(used with --out-file option)",
    )
    parser.add_argument(
        "-q",
        "--quiet",
        action="store_true",
        help="Be quiet (disable logging except critical) " 'Overrides "--verbose" option.',
    )
    parser.add_argument("-v", "--verbose", action="store_true", help="Enable debug logging.")

    args = parser.parse_args()

    if args.verbose:
        logger.setLevel(DEBUG)

    if args.quiet:
        logger.setLevel(CRITICAL)

    testrail_project = get_testrail()

    if args.job_name:
        logger.info(
            "Inspecting {0} build of {1} Jenkins job for TestPlan "
            "details...".format(args.build_number, args.job_name)
        )
        test_plan_name = generate_test_plan_name(args.job_name, args.build_number)
        test_plan = testrail_project.get_plan_by_name(test_plan_name)
        if test_plan:
            args.plan_id = test_plan["id"]
        else:
            logger.warning('TestPlan "{0}" not found!'.format(test_plan_name))

    if not args.plan_id:
        logger.error("There is no TestPlan to process, exiting...")
        return 1

    run_ids = () if not args.run_ids else tuple(int(arg) for arg in args.run_ids.split(","))

    generator = StatisticsGenerator(testrail_project, args.plan_id, run_ids, args.handle_blocked)
    generator.generate()
    stats = generator.dump()

    if args.publish:
        logger.debug("Publishing bugs statistics to TestRail..")
        generator.publish(stats)

    if args.output_file:
        html = generator.dump_html(stats) if args.html else args.html
        save_stats_to_file(stats, args.output_file, html)

        if args.separate_runs:
            for run in generator.test_runs_stats:
                file_name = "{0}_{1}".format(args.output_file, run["id"])
                stats = generator.dump(run_id=run["id"])
                html = generator.dump_html(stats, run["id"]) if args.html else args.html
                save_stats_to_file(stats, file_name, html)

    logger.info("Statistics generation complete!")
 def warn_file_exists(file_path):
     if os.path.exists(file_path):
         logger.warning('File {0} exists and will be '
                        'overwritten!'.format(file_path))
예제 #13
0
def get_tests_results(systest_build, os):
    tests_results = []
    test_build = get_test_build(systest_build['name'],
                                systest_build['number'],
                                check_rebuild=True)
    run_test_data = test_build.test_data()
    test_classes = {}
    for one in run_test_data['suites'][0]['cases']:
        class_name = one['className']
        if class_name not in test_classes:
            test_classes[class_name] = {}
            test_classes[class_name]['child'] = []
            test_classes[class_name]['duration'] = 0
            test_classes[class_name]["failCount"] = 0
            test_classes[class_name]["passCount"] = 0
            test_classes[class_name]["skipCount"] = 0
        else:
            if one['className'] == one['name']:
                logger.warning("Found duplicate test in run - {}".format(
                    one['className']))
                continue

        test_class = test_classes[class_name]
        test_class['child'].append(one)
        test_class['duration'] += float(one['duration'])
        if one['status'].lower() in ('failed', 'error'):
            test_class["failCount"] += 1
        if one['status'].lower() == 'passed':
            test_class["passCount"] += 1
        if one['status'].lower() == 'skipped':
            test_class["skipCount"] += 1

    for klass in test_classes:
        klass_result = test_classes[klass]
        fuel_tests_results = []
        if klass.startswith('fuel_tests.'):
            for one in klass_result['child']:
                test_name = one['name']
                test_package, _, test_class = one['className'].rpartition('.')
                test_result = TestResult(
                    name=test_name,
                    group=expand_test_group(one['name'], systest_build['name'],
                                            os),
                    status=one['status'].lower(),
                    duration='{0}s'.format(int(one['duration']) + 1),
                    url='{0}testReport/{1}/{2}/{3}'.format(
                        test_build.url, test_package, test_class, test_name),
                    version='_'.join([test_build.build_data["id"]] +
                                     (test_build.build_data["description"]
                                      or test_name).split()),
                    description=(test_build.build_data["description"]
                                 or test_name),
                    comments=one['skippedMessage'],
                )
                fuel_tests_results.append(test_result)
        elif len(klass_result['child']) == 1:
            test = klass_result['child'][0]
            if check_untested(test):
                continue
            check_blocked(test)
            test_result = TestResult(
                name=test['name'],
                group=expand_test_group(test['className'],
                                        systest_build['name'], os),
                status=test['status'].lower(),
                duration='{0}s'.format(int(test['duration']) + 1),
                url='{0}testReport/(root)/{1}/'.format(test_build.url,
                                                       test['name']),
                version='_'.join([test_build.build_data["id"]] +
                                 (test_build.build_data["description"]
                                  or test['name']).split()),
                description=test_build.build_data["description"]
                or test['name'],
                comments=test['skippedMessage'])
        else:
            case_steps = []
            test_duration = sum(
                [float(c['duration']) for c in klass_result['child']])
            steps = [
                c for c in klass_result['child']
                if c['name'].startswith('Step')
            ]
            steps = sorted(steps, key=lambda k: k['name'])
            test_name = steps[0]['className']
            test_group = steps[0]['className']
            test_comments = None
            is_test_failed = any(
                [s['status'].lower() in ('failed', 'error') for s in steps])

            for step in steps:
                if step['status'].lower() in ('failed', 'error'):
                    case_steps.append({
                        "content":
                        step['name'],
                        "actual":
                        step['errorStackTrace'] or step['errorDetails'],
                        "status":
                        step['status'].lower()
                    })
                    test_comments = "{err}\n\n\n{stack}".format(
                        err=step['errorDetails'],
                        stack=step['errorStackTrace'])
                else:
                    case_steps.append({
                        "content": step['name'],
                        "actual": "pass",
                        "status": step['status'].lower()
                    })
            test_result = TestResult(
                name=test_name,
                group=expand_test_group(test_group, systest_build['name'], os),
                status='failed' if is_test_failed else 'passed',
                duration='{0}s'.format(int(test_duration) + 1),
                url='{0}testReport/(root)/{1}/'.format(test_build.url,
                                                       test_name),
                version='_'.join([test_build.build_data["id"]] +
                                 (test_build.build_data["description"]
                                  or test_name).split()),
                description=test_build.build_data["description"] or test_name,
                comments=test_comments,
                steps=case_steps,
            )
        if fuel_tests_results:
            tests_results.extend(fuel_tests_results)
        else:
            tests_results.append(test_result)
    return tests_results
예제 #14
0
                test_case = {
                    "title": title,
                    "type_id": 1,
                    "milestone_id": milestone_id,
                    "priority_id": default_test_priority,
                    "estimate": duration,
                    "refs": "",
                    "custom_test_group": test_group,
                    "custom_test_case_description": docstring or " ",
                    "custom_test_case_steps": steps
                }

                if not any([x[GROUP_FIELD] == test_group for x in tests]):
                    tests.append(test_case)
                else:
                    logger.warning("Testcase '{0}' run in multiple "
                                   "Jenkins jobs!".format(test_group))

            plan.tests = all_plan_tests[:]

    return tests


def upload_tests_descriptions(testrail_project, section_id, tests,
                              check_all_sections):
    tests_suite = testrail_project.get_suite_by_name(
        TestRailSettings.tests_suite)
    check_section = None if check_all_sections else section_id
    cases = testrail_project.get_cases(suite_id=tests_suite['id'],
                                       section_id=check_section)
    existing_cases = [case[GROUP_FIELD] for case in cases]
    custom_cases_fields = _get_custom_cases_fields(
예제 #15
0
 def warn_file_exists(file_path):
     if os.path.exists(file_path):
         logger.warning('File {0} exists and will be '
                        'overwritten!'.format(file_path))
예제 #16
0
def main():
    parser = argparse.ArgumentParser(
        description="Generate statistics for bugs linked to TestRun. Publish "
                    "statistics to testrail if necessary."
    )
    parser.add_argument('plan_id', type=int, nargs='?', default=None,
                        help='Test plan ID in TestRail')
    parser.add_argument('-j', '--job-name',
                        dest='job_name', type=str, default=None,
                        help='Name of Jenkins job which runs tests (runner). '
                             'It will be used for TestPlan search instead ID')
    parser.add_argument('-n', '--build-number', dest='build_number',
                        default='latest', help='Jenkins job build number')
    parser.add_argument('-r', '--run-id',
                        dest='run_ids', type=str, default=None,
                        help='(optional) IDs of TestRun to check (skip other)')
    parser.add_argument('-b', '--handle-blocked', action="store_true",
                        dest='handle_blocked', default=False,
                        help='Copy bugs links to downstream blocked results')
    parser.add_argument('-s', '--separate-runs', action="store_true",
                        dest='separate_runs', default=False,
                        help='Create separate statistics for each test run')
    parser.add_argument('-p', '--publish', action="store_true",
                        help='Publish statistics to TestPlan description')
    parser.add_argument('-o', '--out-file', dest='output_file',
                        default=None, type=str,
                        help='Path to file to save statistics as JSON and/or '
                             'HTML. Filename extension is added automatically')
    parser.add_argument('-H', '--html', action="store_true",
                        help='Save statistics in HTML format to file '
                             '(used with --out-file option)')
    parser.add_argument('-q', '--quiet', action="store_true",
                        help='Be quiet (disable logging except critical) '
                             'Overrides "--verbose" option.')
    parser.add_argument("-v", "--verbose", action="store_true",
                        help="Enable debug logging.")

    args = parser.parse_args()

    if args.verbose:
        logger.setLevel(DEBUG)

    if args.quiet:
        logger.setLevel(CRITICAL)

    testrail_project = get_testrail()

    if args.job_name:
        logger.info('Inspecting {0} build of {1} Jenkins job for TestPlan '
                    'details...'.format(args.build_number, args.job_name))
        test_plan_name = generate_test_plan_name(args.job_name,
                                                 args.build_number)
        test_plan = testrail_project.get_plan_by_name(test_plan_name)
        if test_plan:
            args.plan_id = test_plan['id']
        else:
            logger.warning('TestPlan "{0}" not found!'.format(test_plan_name))

    if not args.plan_id:
        logger.error('There is no TestPlan to process, exiting...')
        return 1

    run_ids = () if not args.run_ids else tuple(
        int(arg) for arg in args.run_ids.split(','))

    generator = StatisticsGenerator(testrail_project,
                                    args.plan_id,
                                    run_ids,
                                    args.handle_blocked)
    generator.generate()
    stats = generator.dump()

    if args.publish:
        logger.debug('Publishing bugs statistics to TestRail..')
        generator.publish(stats)

    if args.output_file:
        html = generator.dump_html(stats) if args.html else args.html
        save_stats_to_file(stats, args.output_file, html)

        if args.separate_runs:
            for run in generator.test_runs_stats:
                file_name = '{0}_{1}'.format(args.output_file, run['id'])
                stats = generator.dump(run_id=run['id'])
                html = (generator.dump_html(stats, run['id']) if args.html
                        else args.html)
                save_stats_to_file(stats, file_name, html)

    logger.info('Statistics generation complete!')
def main():
    parser = argparse.ArgumentParser(
        description="Generate statistics for bugs linked to TestRun. Publish "
        "statistics to testrail if necessary.")
    parser.add_argument('plan_id',
                        type=int,
                        nargs='?',
                        default=None,
                        help='Test plan ID in TestRail')
    parser.add_argument('-j',
                        '--job-name',
                        dest='job_name',
                        type=str,
                        default=None,
                        help='Name of Jenkins job which runs tests (runner). '
                        'It will be used for TestPlan search instead ID')
    parser.add_argument('-n',
                        '--build-number',
                        dest='build_number',
                        default='latest',
                        help='Jenkins job build number')
    parser.add_argument('-r',
                        '--run-id',
                        dest='run_ids',
                        type=str,
                        default=None,
                        help='(optional) IDs of TestRun to check (skip other)')
    parser.add_argument('-b',
                        '--handle-blocked',
                        action="store_true",
                        dest='handle_blocked',
                        default=False,
                        help='Copy bugs links to downstream blocked results')
    parser.add_argument('-s',
                        '--separate-runs',
                        action="store_true",
                        dest='separate_runs',
                        default=False,
                        help='Create separate statistics for each test run')
    parser.add_argument('-p',
                        '--publish',
                        action="store_true",
                        help='Publish statistics to TestPlan description')
    parser.add_argument('-o',
                        '--out-file',
                        dest='output_file',
                        default=None,
                        type=str,
                        help='Path to file to save statistics as JSON and/or '
                        'HTML. Filename extension is added automatically')
    parser.add_argument('-H',
                        '--html',
                        action="store_true",
                        help='Save statistics in HTML format to file '
                        '(used with --out-file option)')
    parser.add_argument('-q',
                        '--quiet',
                        action="store_true",
                        help='Be quiet (disable logging except critical) '
                        'Overrides "--verbose" option.')
    parser.add_argument("-v",
                        "--verbose",
                        action="store_true",
                        help="Enable debug logging.")

    args = parser.parse_args()

    if args.verbose:
        logger.setLevel(DEBUG)

    if args.quiet:
        logger.setLevel(CRITICAL)

    testrail_project = get_testrail()

    if args.job_name:
        logger.info('Inspecting {0} build of {1} Jenkins job for TestPlan '
                    'details...'.format(args.build_number, args.job_name))
        test_plan_name = generate_test_plan_name(args.job_name,
                                                 args.build_number)
        test_plan = testrail_project.get_plan_by_name(test_plan_name)
        if test_plan:
            args.plan_id = test_plan['id']
        else:
            logger.warning('TestPlan "{0}" not found!'.format(test_plan_name))

    if not args.plan_id:
        logger.error('There is no TestPlan to process, exiting...')
        return 1

    run_ids = () if not args.run_ids else tuple(
        int(arg) for arg in args.run_ids.split(','))

    generator = StatisticsGenerator(testrail_project, args.plan_id, run_ids,
                                    args.handle_blocked)
    generator.generate()
    stats = generator.dump()

    if args.publish:
        logger.debug('Publishing bugs statistics to TestRail..')
        generator.publish(stats)

    if args.output_file:
        html = generator.dump_html(stats) if args.html else args.html
        save_stats_to_file(stats, args.output_file, html)

        if args.separate_runs:
            for run in generator.test_runs_stats:
                file_name = '{0}_{1}'.format(args.output_file, run['id'])
                stats = generator.dump(run_id=run['id'])
                html = (generator.dump_html(stats, run['id'])
                        if args.html else args.html)
                save_stats_to_file(stats, file_name, html)

    logger.info('Statistics generation complete!')