예제 #1
0
def publish_results(project, milestone_id, test_plan, suite_id, config_id,
                    results):
    test_run_ids = [
        run['id'] for entry in test_plan['entries'] for run in entry['runs']
        if suite_id == run['suite_id'] and config_id in run['config_ids']
    ]
    logger.debug('Looking for previous tests runs on "{0}" using tests suite '
                 '"{1}"...'.format(
                     project.get_config(config_id)['name'],
                     project.get_suite(suite_id)['name']))
    previous_tests_runs = project.get_previous_runs(
        milestone_id=milestone_id,
        suite_id=suite_id,
        config_id=config_id,
        limit=TestRailSettings.previous_results_depth)
    logger.debug('Found next test runs: {0}'.format(
        [test_run['description'] for test_run in previous_tests_runs]))
    cases = project.get_cases(suite_id=suite_id)
    tests = project.get_tests(run_id=test_run_ids[0])
    results_to_publish = []

    for result in results:
        test = project.get_test_by_group(run_id=test_run_ids[0],
                                         group=result.group,
                                         tests=tests)
        if not test:
            logger.error("Test for '{0}' group not found: {1}".format(
                result.group, result.url))
            continue
        existing_results_versions = [
            r['version'] for r in project.get_results_for_test(test['id'])
        ]
        if result.version in existing_results_versions:
            continue
        if result.status not in ('passed', 'blocked'):
            case_id = project.get_case_by_group(suite_id=suite_id,
                                                group=result.group,
                                                cases=cases)['id']
            run_ids = [
                run['id']
                for run in previous_tests_runs[0:int(TestRailSettings.
                                                     previous_results_depth)]
            ]
            previous_results = project.get_all_results_for_case(
                run_ids=run_ids, case_id=case_id)
            lp_bug = get_existing_bug_link(previous_results)
            if lp_bug:
                result.launchpad_bug = lp_bug['bug_link']
        results_to_publish.append(result)

    try:
        if len(results_to_publish) > 0:
            project.add_results_for_cases(run_id=test_run_ids[0],
                                          suite_id=suite_id,
                                          tests_results=results_to_publish)
    except:
        logger.error('Failed to add new results for tests: {0}'.format(
            [r.group for r in results_to_publish]))
        raise
    return results_to_publish
예제 #2
0
def get_tests_groups_from_jenkins(runner_name, build_number, distros):
    runner_build = Build(runner_name, build_number)
    res = {}
    for b in runner_build.build_data['subBuilds']:

        if b['result'] is None:
            logger.debug("Skipping '{0}' job (build #{1}) because it's still "
                         "running...".format(b['jobName'], b['buildNumber'],))
            continue

        # Get the test group from the console of the job
        z = Build(b['jobName'], b['buildNumber'])
        console = z.get_job_console()
        groups = [keyword.split('=')[1]
                  for line in console
                  for keyword in line.split()
                  if 'run_system_test.py' in line and '--group=' in keyword]
        if not groups:
            logger.error("No test group found in console of the job {0}/{1}"
                         .format(b['jobName'], b['buildNumber']))
            continue
        # Use the last group (there can be several groups in upgrade jobs)
        test_group = groups[-1]

        # Get the job suffix
        job_name = b['jobName']
        for distro in distros:
            if distro in job_name:
                sep = '.' + distro + '.'
                job_suffix = job_name.split(sep)[-1]
                break
        else:
            job_suffix = job_name.split('.')[-1]
        res[job_suffix] = test_group
    return res
예제 #3
0
def get_tests_groups_from_jenkins(runner_name, build_number, distros):
    runner_build = Build(runner_name, build_number)
    res = {}
    sub_builds = \
        runner_build.build_data.get('subBuilds', [runner_build.build_data])
    for b in sub_builds:
        if b['result'] is None:
            logger.debug("Skipping '{0}' job (build #{1}) because it's still "
                         "running...".format(
                             b['jobName'],
                             b['buildNumber'],
                         ))
            continue

        # Get the test group from the console of the job
        # Get the job suffix
        if b.get('jobName'):
            z = Build(b['jobName'], b['buildNumber'])
            console = z.get_job_console()
            job_name = b['jobName']
            job_info = z.job_info
            env_vars = z.injected_vars
        else:
            console = runner_build.get_job_console()
            job_name = runner_build.name
            job_info = runner_build.job_info
            env_vars = runner_build.injected_vars

        groups = re.findall(TEST_GROUP_PATTERN, console)

        if not groups:
            # maybe it's failed baremetal job?
            # because of a design baremetal tests run pre-setup job
            # and when it fails there are no test groups in common meaning:
            # groups which could be parsed by TEST_GROUP_PATTERN
            baremetal_pattern = re.compile(r'Jenkins Build.*jenkins-(.*)-\d+')
            baremetal_groups = re.findall(baremetal_pattern, console)
            if not baremetal_groups:
                logger.error(
                    "No test group found in console of the job {0}/{1}".format(
                        b['jobName'], b['buildNumber']))
                continue
            # we should get the group via jobName because the test group name
            # inside the log could be cut and some symbols will be changed to *
            groups = b['jobName'].split('.')
        # Use the last group (there can be several groups in upgrade jobs)
        test_group = groups[-1]

        for distro in distros:
            if distro in job_name:
                sep = '.' + distro + '.'
                job_suffix = job_name.split(sep)[-1]
                break
        else:
            job_suffix = job_name.split('.')[-1]
        res[job_suffix] = \
            {'group': test_group, 'job_info': job_info, 'env_vars': env_vars}
    return res
예제 #4
0
파일: report.py 프로젝트: avgoor/fuel-qa
def publish_results(project, milestone_id, test_plan,
                    suite_id, config_id, results):
    test_run_ids = [run['id'] for entry in test_plan['entries']
                    for run in entry['runs'] if suite_id == run['suite_id'] and
                    config_id in run['config_ids']]
    logger.debug('Looking for previous tests runs on "{0}" using tests suite '
                 '"{1}"...'.format(project.get_config(config_id)['name'],
                                   project.get_suite(suite_id)['name']))
    previous_tests_runs = project.get_previous_runs(
        milestone_id=milestone_id,
        suite_id=suite_id,
        config_id=config_id,
        limit=TestRailSettings.previous_results_depth)
    logger.debug('Found next test runs: {0}'.format(
        [test_run['description'] for test_run in previous_tests_runs]))
    cases = project.get_cases(suite_id=suite_id)
    tests = project.get_tests(run_id=test_run_ids[0])
    results_to_publish = []

    for result in results:
        test = project.get_test_by_group(run_id=test_run_ids[0],
                                         group=result.group,
                                         tests=tests)
        if not test:
            logger.error("Test for '{0}' group not found: {1}".format(
                result.group, result.url))
            continue
        existing_results_versions = [r['version'] for r in
                                     project.get_results_for_test(test['id'])]
        if result.version in existing_results_versions:
            continue
        if result.status not in ('passed', 'blocked'):
            case_id = project.get_case_by_group(suite_id=suite_id,
                                                group=result.group,
                                                cases=cases)['id']
            run_ids = [run['id'] for run in previous_tests_runs[0:
                       int(TestRailSettings.previous_results_depth)]]
            previous_results = project.get_all_results_for_case(
                run_ids=run_ids,
                case_id=case_id)
            lp_bug = get_existing_bug_link(previous_results)
            if lp_bug:
                result.launchpad_bug = lp_bug['bug_link']
        results_to_publish.append(result)

    try:
        if len(results_to_publish) > 0:
            project.add_results_for_cases(run_id=test_run_ids[0],
                                          suite_id=suite_id,
                                          tests_results=results_to_publish)
    except:
        logger.error('Failed to add new results for tests: {0}'.format(
            [r.group for r in results_to_publish]
        ))
        raise
    return results_to_publish
 def get_test_by_group(self, group, version):
     if group in GROUPS_TO_EXPAND:
         m = re.search(r'^\d+_(\S+)_on_[\d\.]+', version)
         if m:
             tests_thread = m.group(1)
             group = '{0}_{1}'.format(group, tests_thread)
     for test in self.tests:
         if test['custom_test_group'] == group:
             return test
     logger.error('Test with group "{0}" not found!'.format(group))
예제 #6
0
 def get_test_by_group(self, group, version):
     if group in GROUPS_TO_EXPAND:
         m = re.search(r'^\d+_(\S+)_on_[\d\.]+', version)
         if m:
             tests_thread = m.group(1)
             group = '{0}_{1}'.format(group, tests_thread)
     for test in self.tests:
         if test['custom_test_group'] == group:
             return test
     logger.error('Test with group "{0}" not found!'.format(group))
예제 #7
0
 def get_all_results_for_case(self, run_ids, case_id):
     all_results = []
     for run_id in run_ids:
         try:
             results = self.get_results_for_case(run_id=run_id,
                                                 case_id=case_id)
         except APIError as e:
             logger.error("[{0}], run_id={1}, case_id={2}"
                          .format(e, run_id, case_id))
             continue
         all_results.extend(results)
     return all_results
예제 #8
0
 def get_all_results_for_case(self, run_ids, case_id):
     all_results = []
     for run_id in run_ids:
         try:
             results = self.get_results_for_case(run_id=run_id,
                                                 case_id=case_id)
         except APIError as e:
             logger.error("[{0}], run_id={1}, case_id={2}".format(
                 e, run_id, case_id))
             continue
         all_results.extend(results)
     return all_results
예제 #9
0
 def get_test_by_group(self, group, version):
     if group in GROUPS_TO_EXPAND:
         m = re.search(r'^\d+_(\S+)_on_[\d\.]+', version)
         if m:
             tests_thread = m.group(1)
             group = '{0}_{1}'.format(group, tests_thread)
     elif TestRailSettings.extra_factor_of_tc_definition:
         group = '{}_{}'.format(
             group, TestRailSettings.extra_factor_of_tc_definition)
     for test in self.tests:
         if test['custom_test_group'] == group:
             return test
     logger.error('Test with group "{0}" not found!'.format(group))
 def get_test_by_group(self, group, version):
     if group in GROUPS_TO_EXPAND:
         m = re.search(r'^\d+_(\S+)_on_[\d\.]+', version)
         if m:
             tests_thread = m.group(1)
             group = '{0}_{1}'.format(group, tests_thread)
     elif TestRailSettings.extra_factor_of_tc_definition:
         group = '{}_{}'.format(
             group,
             TestRailSettings.extra_factor_of_tc_definition
         )
     for test in self.tests:
         if test['custom_test_group'] == group:
             return test
     logger.error('Test with group "{0}" not found!'.format(group))
예제 #11
0
def _get_custom_cases_fields(case_fields, project_id):
    custom_cases_fields = {}
    for field in case_fields:
        for config in field['configs']:
            if ((project_id in config['context']['project_ids']
                 or not config['context']['project_ids'])
                    and config['options']['is_required']):
                try:
                    custom_cases_fields[field['system_name']] = \
                        int(config['options']['items'].split(',')[0])
                except:
                    logger.error("Couldn't find default value for required "
                                 "field '{0}', setting '1' (index)!".format(
                                     field['system_name']))
                    custom_cases_fields[field['system_name']] = 1
    return custom_cases_fields
예제 #12
0
def _get_custom_cases_fields(case_fields, project_id):
    custom_cases_fields = {}
    for field in case_fields:
        for config in field['configs']:
            if ((project_id in
                    config['context']['project_ids'] or
                    not config['context']['project_ids']) and
                    config['options']['is_required']):
                try:
                    custom_cases_fields[field['system_name']] = \
                        int(config['options']['items'].split(',')[0])
                except:
                    logger.error("Couldn't find default value for required "
                                 "field '{0}', setting '1' (index)!".format(
                                     field['system_name']))
                    custom_cases_fields[field['system_name']] = 1
    return custom_cases_fields
def upload_tests_descriptions(testrail_project, section_id, tests,
                              check_all_sections):
    tests_suite = testrail_project.get_suite_by_name(
        TestRailSettings.tests_suite)
    check_section = None if check_all_sections else section_id
    existing_cases = [
        case['custom_test_group']
        for case in testrail_project.get_cases(suite_id=tests_suite['id'],
                                               section_id=check_section)
    ]
    custom_cases_fields = {}
    for field in testrail_project.get_case_fields():
        for config in field['configs']:
            if ((testrail_project.project['id']
                 in config['context']['project_ids']
                 or not config['context']['project_ids'])
                    and config['options']['is_required']):
                try:
                    custom_cases_fields[field['system_name']] = \
                        int(config['options']['items'].split(',')[0])
                except:
                    logger.error("Couldn't find default value for required "
                                 "field '{0}', setting '1' (index)!".format(
                                     field['system_name']))
                    custom_cases_fields[field['system_name']] = 1

    for test_case in tests:
        if test_case['custom_test_group'] in existing_cases:
            logger.debug('Skipping uploading "{0}" test case because it '
                         'already exists in "{1}" tests section.'.format(
                             test_case['custom_test_group'],
                             TestRailSettings.tests_suite))
            continue

        for case_field, default_value in custom_cases_fields.items():
            if case_field not in test_case:
                test_case[case_field] = default_value

        logger.debug('Uploading test "{0}" to TestRail project "{1}", '
                     'suite "{2}", section "{3}"'.format(
                         test_case["custom_test_group"],
                         TestRailSettings.project,
                         TestRailSettings.tests_suite,
                         TestRailSettings.tests_section))
        testrail_project.add_case(section_id=section_id, case=test_case)
def get_tests_groups_from_jenkins(runner_name, build_number, distros):
    runner_build = Build(runner_name, build_number)
    res = {}
    sub_builds = \
        runner_build.build_data.get('subBuilds', [runner_build.build_data])
    for b in sub_builds:
        if b['result'] is None:
            logger.debug("Skipping '{0}' job (build #{1}) because it's still "
                         "running...".format(b['jobName'], b['buildNumber'],))
            continue

        # Get the test group from the console of the job
        # Get the job suffix
        if b.get('jobName'):
            z = Build(b['jobName'], b['buildNumber'])
            console = z.get_job_console()
            job_name = b['jobName']
            job_info = z.job_info
            env_vars = z.injected_vars
        else:
            console = runner_build.get_job_console()
            job_name = runner_build.name
            job_info = runner_build.job_info
            env_vars = runner_build.injected_vars

        groups = re.findall(TEST_GROUP_PATTERN, console)

        if not groups:
            logger.error("No test group found in console of the job {0}/{1}"
                         .format(b['jobName'], b['buildNumber']))
            continue
        # Use the last group (there can be several groups in upgrade jobs)
        test_group = groups[-1]

        for distro in distros:
            if distro in job_name:
                sep = '.' + distro + '.'
                job_suffix = job_name.split(sep)[-1]
                break
        else:
            job_suffix = job_name.split('.')[-1]
        res[job_suffix] = \
            {'group': test_group, 'job_info': job_info, 'env_vars': env_vars}
    return res
예제 #15
0
def get_tests_groups_from_jenkins(runner_name, build_number, distros):
    runner_build = Build(runner_name, build_number)
    res = {}
    for b in runner_build.build_data['subBuilds']:

        if b['result'] is None:
            logger.debug("Skipping '{0}' job (build #{1}) because it's still "
                         "running...".format(
                             b['jobName'],
                             b['buildNumber'],
                         ))
            continue

        # Get the test group from the console of the job
        z = Build(b['jobName'], b['buildNumber'])
        console = z.get_job_console()
        groups = [
            keyword.split('=')[1] for line in console
            for keyword in line.split()
            if 'run_system_test.py' in line and '--group=' in keyword
        ]
        if not groups:
            logger.error(
                "No test group found in console of the job {0}/{1}".format(
                    b['jobName'], b['buildNumber']))
            continue
        # Use the last group (there can be several groups in upgrade jobs)
        test_group = groups[-1]

        # Get the job suffix
        job_name = b['jobName']
        for distro in distros:
            if distro in job_name:
                sep = '.' + distro + '.'
                job_suffix = job_name.split(sep)[-1]
                break
        else:
            job_suffix = job_name.split('.')[-1]
        res[job_suffix] = test_group
    return res
예제 #16
0
def get_build_test_data(build_number, job_name,
                        jenkins_url=JENKINS.get('url')):
    """ Get build test data from Jenkins from nosetests.xml

    :param build_number: int - Jenkins build number
    :param job_name: str - Jenkins job_name
    :param jenkins_url: str - Jenkins http url
    :return: test_data: dict - build info or None otherwise
    """

    test_data = None
    logger.info('Getting subbuild {} {}'.format(job_name, build_number))
    runner_build = Build(job_name, build_number)
    buildinfo = runner_build.get_build_data(depth=0)
    if not buildinfo:
        logger.error('Getting subbuilds info is failed. '
                     'Job={} Build={}'.format(job_name, build_number))
        return test_data
    try:
        artifact_paths = [
            v for i in buildinfo.get('artifacts') for k, v in i.items()
            if k == 'relativePath'
            and v == JENKINS.get('xml_testresult_file_name')
        ][0]
        artifact_url = "/".join(
            [jenkins_url, 'job', job_name,
             str(build_number)])
        xdata = get_build_artifact(artifact_url, artifact_paths)
        test_data = xmltodict.parse(xdata, xml_attribs=True)
        test_data.update({
            'build_number': build_number,
            'job_name': job_name,
            'job_url': buildinfo.get('url'),
            'job_description': buildinfo.get('description'),
            'job_status': buildinfo.get('result')
        })
    except:
        test_data = None
    return test_data
def get_build_test_data(build_number, job_name,
                        jenkins_url=JENKINS.get('url')):
    """ Get build test data from Jenkins from nosetests.xml

    :param build_number: int - Jenkins build number
    :param job_name: str - Jenkins job_name
    :param jenkins_url: str - Jenkins http url
    :return: test_data: dict - build info or None otherwise
    """

    test_data = None
    logger.info('Getting subbuild {} {}'.format(job_name,
                                                build_number))
    runner_build = Build(job_name, build_number)
    buildinfo = runner_build.get_build_data(depth=0)
    if not buildinfo:
        logger.error('Getting subbuilds info is failed. '
                     'Job={} Build={}'.format(job_name, build_number))
        return test_data
    try:
        artifact_paths = [v for i in buildinfo.get('artifacts')
                          for k, v in i.items() if k == 'relativePath' and
                          v == JENKINS.get('xml_testresult_file_name')][0]
        artifact_url = "/".join([jenkins_url, 'job', job_name,
                                 str(build_number)])
        xdata = get_build_artifact(artifact_url, artifact_paths)
        test_data = xmltodict.parse(xdata, xml_attribs=True)
        test_data.update({'build_number': build_number,
                          'job_name': job_name,
                          'job_url': buildinfo.get('url'),
                          'job_description':
                              buildinfo.get('description'),
                          'job_status': buildinfo.get('result')})
    except:
        test_data = None
    return test_data
예제 #18
0
파일: report.py 프로젝트: avgoor/fuel-qa
 def status(self):
     for s in self.available_statuses.keys():
         if self._status in self.available_statuses[s]:
             return s
     logger.error('Unsupported result status: "{0}"!'.format(self._status))
     return self._status
예제 #19
0
파일: report.py 프로젝트: avgoor/fuel-qa
def main():

    parser = OptionParser(
        description="Publish results of system tests from Jenkins build to "
                    "TestRail. See settings.py for configuration."
    )
    parser.add_option('-j', '--job-name', dest='job_name', default=None,
                      help='Jenkins swarm runner job name')
    parser.add_option('-N', '--build-number', dest='build_number',
                      default='latest',
                      help='Jenkins swarm runner build number')
    parser.add_option('-o', '--one-job', dest='one_job_name',
                      default=None,
                      help=('Process only one job name from the specified '
                            'parent job or view'))
    parser.add_option("-w", "--view", dest="jenkins_view", default=False,
                      help="Get system tests jobs from Jenkins view")
    parser.add_option("-l", "--live", dest="live_report", action="store_true",
                      help="Get tests results from running swarm")
    parser.add_option("-m", "--manual", dest="manual_run", action="store_true",
                      help="Manually add tests cases to TestRun (tested only)")
    parser.add_option('-c', '--create-plan-only', action="store_true",
                      dest="create_plan_only", default=False,
                      help='Jenkins swarm runner job name')
    parser.add_option("-v", "--verbose",
                      action="store_true", dest="verbose", default=False,
                      help="Enable debug output")

    (options, _) = parser.parse_args()

    if options.verbose:
        logger.setLevel(DEBUG)

    if options.live_report and options.build_number == 'latest':
        options.build_number = 'latest_started'

    # STEP #1
    # Initialize TestRail Project and define configuration
    logger.info('Initializing TestRail Project configuration...')
    project = TestRailProject(url=TestRailSettings.url,
                              user=TestRailSettings.user,
                              password=TestRailSettings.password,
                              project=TestRailSettings.project)

    tests_suite = project.get_suite_by_name(TestRailSettings.tests_suite)
    operation_systems = [{'name': config['name'], 'id': config['id'],
                         'distro': config['name'].split()[0].lower()}
                         for config in project.get_config_by_name(
                             'Operation System')['configs'] if
                         config['name'] in TestRailSettings.operation_systems]
    tests_results = {os['distro']: [] for os in operation_systems}

    # STEP #2
    # Get tests results from Jenkins
    logger.info('Getting tests results from Jenkins...')
    if options.jenkins_view:
        jobs = get_jobs_for_view(options.jenkins_view)
        tests_jobs = [{'name': j, 'number': 'latest'}
                      for j in jobs if 'system_test' in j] if \
            not options.create_plan_only else []
        runner_job = [j for j in jobs if 'runner' in j][0]
        runner_build = Build(runner_job, 'latest')
    elif options.job_name:
        runner_build = Build(options.job_name, options.build_number)
        tests_jobs = get_downstream_builds(runner_build.build_data) if \
            not options.create_plan_only else []
    else:
        logger.error("Please specify either Jenkins swarm runner job name (-j)"
                     " or Jenkins view with system tests jobs (-w). Exiting..")
        return

    for systest_build in tests_jobs:
        if (options.one_job_name and
                options.one_job_name != systest_build['name']):
            logger.debug("Skipping '{0}' because --one-job is specified"
                         .format(systest_build['name']))
            continue
        if options.job_name:
            if 'result' not in systest_build.keys():
                logger.debug("Skipping '{0}' job because it does't run tests "
                             "(build #{1} contains no results)".format(
                                 systest_build['name'],
                                 systest_build['number']))
                continue
            if systest_build['result'] is None:
                logger.debug("Skipping '{0}' job (build #{1}) because it's sti"
                             "ll running...".format(systest_build['name'],
                                                    systest_build['number'],))
                continue
        for os in tests_results.keys():
            if os in systest_build['name'].lower():
                tests_results[os].extend(get_tests_results(systest_build, os))

    # STEP #3
    # Create new TestPlan in TestRail (or get existing) and add TestRuns
    milestone, iso_number, prefix = get_version(runner_build.build_data)
    milestone = project.get_milestone_by_name(name=milestone)

    test_plan_name = ' '.join(
        filter(lambda x: bool(x),
               (milestone['name'], prefix, 'iso', '#' + str(iso_number))))

    test_plan = project.get_plan_by_name(test_plan_name)
    iso_link = '/'.join([JENKINS['url'], 'job',
                         '{0}.all'.format(milestone['name']), str(iso_number)])
    if not test_plan:
        test_plan = project.add_plan(test_plan_name,
                                     description=iso_link,
                                     milestone_id=milestone['id'],
                                     entries=[]
                                     )
        logger.info('Created new TestPlan "{0}".'.format(test_plan_name))
    else:
        logger.info('Found existing TestPlan "{0}".'.format(test_plan_name))

    if options.create_plan_only:
        return

    plan_entries = []
    all_cases = project.get_cases(suite_id=tests_suite['id'])
    for os in operation_systems:
        cases_ids = []
        if options.manual_run:
            all_results_groups = [r.group for r in tests_results[os['distro']]]
            for case in all_cases:
                if case['custom_test_group'] in all_results_groups:
                    cases_ids.append(case['id'])
        plan_entries.append(
            project.test_run_struct(
                name='{suite_name}'.format(suite_name=tests_suite['name']),
                suite_id=tests_suite['id'],
                milestone_id=milestone['id'],
                description='Results of system tests ({tests_suite}) on is'
                'o #"{iso_number}"'.format(tests_suite=tests_suite['name'],
                                           iso_number=iso_number),
                config_ids=[os['id']],
                include_all=True,
                case_ids=cases_ids
            )
        )

    if not any(entry['suite_id'] == tests_suite['id']
               for entry in test_plan['entries']):
        if project.add_plan_entry(plan_id=test_plan['id'],
                                  suite_id=tests_suite['id'],
                                  config_ids=[os['id'] for os
                                              in operation_systems],
                                  runs=plan_entries):
            test_plan = project.get_plan(test_plan['id'])

    # STEP #4
    # Upload tests results to TestRail
    logger.info('Uploading tests results to TestRail...')
    for os in operation_systems:
        logger.info('Checking tests results for "{0}"...'.format(os['name']))
        results_to_publish = publish_results(
            project=project,
            milestone_id=milestone['id'],
            test_plan=test_plan,
            suite_id=tests_suite['id'],
            config_id=os['id'],
            results=tests_results[os['distro']]
        )
        logger.debug('Added new results for tests ({os}): {tests}'.format(
            os=os['name'], tests=[r.group for r in results_to_publish]
        ))

    logger.info('Report URL: {0}'.format(test_plan['url']))
def main():
    parser = argparse.ArgumentParser(
        description="Generate statistics for bugs linked to TestRun. Publish " "statistics to testrail if necessary."
    )
    parser.add_argument("plan_id", type=int, nargs="?", default=None, help="Test plan ID in TestRail")
    parser.add_argument(
        "-j",
        "--job-name",
        dest="job_name",
        type=str,
        default=None,
        help="Name of Jenkins job which runs tests (runner). " "It will be used for TestPlan search instead ID",
    )
    parser.add_argument("-n", "--build-number", dest="build_number", default="latest", help="Jenkins job build number")
    parser.add_argument(
        "-r", "--run-id", dest="run_ids", type=str, default=None, help="(optional) IDs of TestRun to check (skip other)"
    )
    parser.add_argument(
        "-b",
        "--handle-blocked",
        action="store_true",
        dest="handle_blocked",
        default=False,
        help="Copy bugs links to downstream blocked results",
    )
    parser.add_argument(
        "-s",
        "--separate-runs",
        action="store_true",
        dest="separate_runs",
        default=False,
        help="Create separate statistics for each test run",
    )
    parser.add_argument("-p", "--publish", action="store_true", help="Publish statistics to TestPlan description")
    parser.add_argument(
        "-o",
        "--out-file",
        dest="output_file",
        default=None,
        type=str,
        help="Path to file to save statistics as JSON and/or " "HTML. Filename extension is added automatically",
    )
    parser.add_argument(
        "-H",
        "--html",
        action="store_true",
        help="Save statistics in HTML format to file " "(used with --out-file option)",
    )
    parser.add_argument(
        "-q",
        "--quiet",
        action="store_true",
        help="Be quiet (disable logging except critical) " 'Overrides "--verbose" option.',
    )
    parser.add_argument("-v", "--verbose", action="store_true", help="Enable debug logging.")

    args = parser.parse_args()

    if args.verbose:
        logger.setLevel(DEBUG)

    if args.quiet:
        logger.setLevel(CRITICAL)

    testrail_project = get_testrail()

    if args.job_name:
        logger.info(
            "Inspecting {0} build of {1} Jenkins job for TestPlan "
            "details...".format(args.build_number, args.job_name)
        )
        test_plan_name = generate_test_plan_name(args.job_name, args.build_number)
        test_plan = testrail_project.get_plan_by_name(test_plan_name)
        if test_plan:
            args.plan_id = test_plan["id"]
        else:
            logger.warning('TestPlan "{0}" not found!'.format(test_plan_name))

    if not args.plan_id:
        logger.error("There is no TestPlan to process, exiting...")
        return 1

    run_ids = () if not args.run_ids else tuple(int(arg) for arg in args.run_ids.split(","))

    generator = StatisticsGenerator(testrail_project, args.plan_id, run_ids, args.handle_blocked)
    generator.generate()
    stats = generator.dump()

    if args.publish:
        logger.debug("Publishing bugs statistics to TestRail..")
        generator.publish(stats)

    if args.output_file:
        html = generator.dump_html(stats) if args.html else args.html
        save_stats_to_file(stats, args.output_file, html)

        if args.separate_runs:
            for run in generator.test_runs_stats:
                file_name = "{0}_{1}".format(args.output_file, run["id"])
                stats = generator.dump(run_id=run["id"])
                html = generator.dump_html(stats, run["id"]) if args.html else args.html
                save_stats_to_file(stats, file_name, html)

    logger.info("Statistics generation complete!")
def main():
    parser = argparse.ArgumentParser(
        description="Generate statistics for bugs linked to TestRun. Publish "
        "statistics to testrail if necessary.")
    parser.add_argument('plan_id',
                        type=int,
                        nargs='?',
                        default=None,
                        help='Test plan ID in TestRail')
    parser.add_argument('-j',
                        '--job-name',
                        dest='job_name',
                        type=str,
                        default=None,
                        help='Name of Jenkins job which runs tests (runner). '
                        'It will be used for TestPlan search instead ID')
    parser.add_argument('-n',
                        '--build-number',
                        dest='build_number',
                        default='latest',
                        help='Jenkins job build number')
    parser.add_argument('-r',
                        '--run-id',
                        dest='run_ids',
                        type=str,
                        default=None,
                        help='(optional) IDs of TestRun to check (skip other)')
    parser.add_argument('-b',
                        '--handle-blocked',
                        action="store_true",
                        dest='handle_blocked',
                        default=False,
                        help='Copy bugs links to downstream blocked results')
    parser.add_argument('-s',
                        '--separate-runs',
                        action="store_true",
                        dest='separate_runs',
                        default=False,
                        help='Create separate statistics for each test run')
    parser.add_argument('-p',
                        '--publish',
                        action="store_true",
                        help='Publish statistics to TestPlan description')
    parser.add_argument('-o',
                        '--out-file',
                        dest='output_file',
                        default=None,
                        type=str,
                        help='Path to file to save statistics as JSON and/or '
                        'HTML. Filename extension is added automatically')
    parser.add_argument('-H',
                        '--html',
                        action="store_true",
                        help='Save statistics in HTML format to file '
                        '(used with --out-file option)')
    parser.add_argument('-q',
                        '--quiet',
                        action="store_true",
                        help='Be quiet (disable logging except critical) '
                        'Overrides "--verbose" option.')
    parser.add_argument("-v",
                        "--verbose",
                        action="store_true",
                        help="Enable debug logging.")

    args = parser.parse_args()

    if args.verbose:
        logger.setLevel(DEBUG)

    if args.quiet:
        logger.setLevel(CRITICAL)

    testrail_project = get_testrail()

    if args.job_name:
        logger.info('Inspecting {0} build of {1} Jenkins job for TestPlan '
                    'details...'.format(args.build_number, args.job_name))
        test_plan_name = generate_test_plan_name(args.job_name,
                                                 args.build_number)
        test_plan = testrail_project.get_plan_by_name(test_plan_name)
        if test_plan:
            args.plan_id = test_plan['id']
        else:
            logger.warning('TestPlan "{0}" not found!'.format(test_plan_name))

    if not args.plan_id:
        logger.error('There is no TestPlan to process, exiting...')
        return 1

    run_ids = () if not args.run_ids else tuple(
        int(arg) for arg in args.run_ids.split(','))

    generator = StatisticsGenerator(testrail_project, args.plan_id, run_ids,
                                    args.handle_blocked)
    generator.generate()
    stats = generator.dump()

    if args.publish:
        logger.debug('Publishing bugs statistics to TestRail..')
        generator.publish(stats)

    if args.output_file:
        html = generator.dump_html(stats) if args.html else args.html
        save_stats_to_file(stats, args.output_file, html)

        if args.separate_runs:
            for run in generator.test_runs_stats:
                file_name = '{0}_{1}'.format(args.output_file, run['id'])
                stats = generator.dump(run_id=run['id'])
                html = (generator.dump_html(stats, run['id'])
                        if args.html else args.html)
                save_stats_to_file(stats, file_name, html)

    logger.info('Statistics generation complete!')
def main():
    """
    :param argv: command line arguments
    :return: None
    """

    parser = argparse.ArgumentParser(description='Get downstream build info'
                                     ' for Jenkins swarm.runner build.'
                                     ' Generate matrix statistics:'
                                     ' (failure group -> builds & tests).'
                                     ' Publish matrix to Testrail'
                                     ' if necessary.')
    parser.add_argument('-n', '--build-number', type=int, required=False,
                        dest='build_number', help='Jenkins job build number')
    parser.add_argument('-j', '--job-name', type=str,
                        dest='job_name', default='9.0.swarm.runner',
                        help='Name of Jenkins job which runs tests (runner)')
    parser.add_argument('-f', '--format', type=str, dest='formatfile',
                        default='html',
                        help='format statistics: html,json,table')
    parser.add_argument('-o', '--out', type=str, dest="fileoutput",
                        default='failure_groups_statistics',
                        help='Save statistics to file')
    parser.add_argument('-t', '--track', action="store_true",
                        help='Publish statistics to TestPlan description')
    parser.add_argument('-q', '--quiet', action="store_true",
                        help='Be quiet (disable logging except critical) '
                             'Overrides "--verbose" option.')
    parser.add_argument("-v", "--verbose", action="store_true",
                        help="Enable debug logging.")
    args = parser.parse_args()

    if args.verbose:
        logger.setLevel(DEBUG)
    if args.quiet:
        logger.setLevel(CRITICAL)
    if args.formatfile and\
       args.formatfile not in ['json', 'html', 'xls', 'xlsx', 'yaml', 'csv']:
        logger.info('Not supported format output. Exit')
        return 2
    if not args.build_number:
        runner_build = Build(args.job_name, 'latest')
        logger.info('Latest build number is {}. Job is {}'.
                    format(runner_build.number, args.job_name))
        args.build_number = runner_build.number

    logger.info('Getting subbuilds for {} {}'.format(args.job_name,
                                                     args.build_number))
    subbuilds, swarm_jenkins_info = get_sub_builds(args.build_number)
    if not subbuilds or not swarm_jenkins_info:
        logger.error('Necessary subbuilds info are absent. Exit')
        return 3
    logger.info('{} Subbuilds have been found'.format(len(subbuilds)))

    logger.info('Calculating failure groups')
    failure_gd = get_global_failure_group_list(subbuilds)[0]
    if not failure_gd:
        logger.error('Necessary failure grpoup info are absent. Exit')
        return 4
    logger.info('{} Failure groups have been found'.format(len(failure_gd)))

    logger.info('Getting TestRail data')
    testrail_testdata = get_testrail_testdata(args.job_name,
                                              args.build_number)
    if not testrail_testdata:
        logger.error('Necessary testrail info are absent. Exit')
        return 5
    logger.info('TestRail data have been downloaded')

    logger.info('Getting TestRail bugs')
    testrail_bugs = get_bugs(subbuilds, testrail_testdata)
    if not testrail_bugs:
        logger.error('Necessary testrail bugs info are absent. Exit')
        return 6
    logger.info('TestRail bugs have been got')

    logger.info('Update subbuilds data')
    update_subbuilds_failuregroup(subbuilds, failure_gd,
                                  testrail_testdata,
                                  testrail_bugs)
    logger.info('Subbuilds data have been updated')

    logger.info('Generating statistics across all failure groups')
    statistics = get_statistics(failure_gd, format_out=args.formatfile)
    if not statistics:
        logger.error('Necessary statistics info are absent. Exit')
        return 7
    logger.info('Statistics have been generated')

    if args.fileoutput and args.formatfile:
        logger.info('Save statistics')
        dump_statistics(statistics, args.build_number, args.job_name,
                        args.formatfile, args.fileoutput)
        logger.info('Statistics have been saved')
    if args.track:
        logger.info('Publish statistics to TestRail')
        if publish_statistics(statistics, args.build_number, args.job_name):
            logger.info('Statistics have been published')
        else:
            logger.info('Statistics have not been published'
                        'due to internal issue')
예제 #23
0
 def status(self):
     for s in self.available_statuses.keys():
         if self._status in self.available_statuses[s]:
             return s
     logger.error('Unsupported result status: "{0}"!'.format(self._status))
     return self._status
예제 #24
0
def main():

    parser = OptionParser(
        description="Publish results of system tests from Jenkins build to "
        "TestRail. See settings.py for configuration.")
    parser.add_option('-j',
                      '--job-name',
                      dest='job_name',
                      default=None,
                      help='Jenkins swarm runner job name')
    parser.add_option('-N',
                      '--build-number',
                      dest='build_number',
                      default='latest',
                      help='Jenkins swarm runner build number')
    parser.add_option('-o',
                      '--one-job',
                      dest='one_job_name',
                      default=None,
                      help=('Process only one job name from the specified '
                            'parent job or view'))
    parser.add_option("-w",
                      "--view",
                      dest="jenkins_view",
                      default=False,
                      help="Get system tests jobs from Jenkins view")
    parser.add_option("-l",
                      "--live",
                      dest="live_report",
                      action="store_true",
                      help="Get tests results from running swarm")
    parser.add_option("-m",
                      "--manual",
                      dest="manual_run",
                      action="store_true",
                      help="Manually add tests cases to TestRun (tested only)")
    parser.add_option('-c',
                      '--create-plan-only',
                      action="store_true",
                      dest="create_plan_only",
                      default=False,
                      help='Jenkins swarm runner job name')
    parser.add_option("-v",
                      "--verbose",
                      action="store_true",
                      dest="verbose",
                      default=False,
                      help="Enable debug output")

    (options, _) = parser.parse_args()

    if options.verbose:
        logger.setLevel(DEBUG)

    if options.live_report and options.build_number == 'latest':
        options.build_number = 'latest_started'

    # STEP #1
    # Initialize TestRail Project and define configuration
    logger.info('Initializing TestRail Project configuration...')
    project = TestRailProject(url=TestRailSettings.url,
                              user=TestRailSettings.user,
                              password=TestRailSettings.password,
                              project=TestRailSettings.project)

    tests_suite = project.get_suite_by_name(TestRailSettings.tests_suite)
    operation_systems = [
        {
            'name': config['name'],
            'id': config['id'],
            'distro': config['name'].split()[0].lower()
        }
        for config in project.get_config_by_name('Operation System')['configs']
        if config['name'] in TestRailSettings.operation_systems
    ]
    tests_results = {os['distro']: [] for os in operation_systems}

    # STEP #2
    # Get tests results from Jenkins
    logger.info('Getting tests results from Jenkins...')
    if options.jenkins_view:
        jobs = get_jobs_for_view(options.jenkins_view)
        tests_jobs = [{'name': j, 'number': 'latest'}
                      for j in jobs if 'system_test' in j] if \
            not options.create_plan_only else []
        runner_job = [j for j in jobs if 'runner' in j][0]
        runner_build = Build(runner_job, 'latest')
    elif options.job_name:
        runner_build = Build(options.job_name, options.build_number)
        tests_jobs = get_downstream_builds(runner_build.build_data) if \
            not options.create_plan_only else []
    else:
        logger.error("Please specify either Jenkins swarm runner job name (-j)"
                     " or Jenkins view with system tests jobs (-w). Exiting..")
        return

    for systest_build in tests_jobs:
        if (options.one_job_name
                and options.one_job_name != systest_build['name']):
            logger.debug(
                "Skipping '{0}' because --one-job is specified".format(
                    systest_build['name']))
            continue
        if options.job_name:
            if 'result' not in systest_build.keys():
                logger.debug("Skipping '{0}' job because it does't run tests "
                             "(build #{1} contains no results)".format(
                                 systest_build['name'],
                                 systest_build['number']))
                continue
            if systest_build['result'] is None:
                logger.debug("Skipping '{0}' job (build #{1}) because it's sti"
                             "ll running...".format(
                                 systest_build['name'],
                                 systest_build['number'],
                             ))
                continue
        for os in tests_results.keys():
            if os in systest_build['name'].lower():
                tests_results[os].extend(get_tests_results(systest_build, os))

    # STEP #3
    # Create new TestPlan in TestRail (or get existing) and add TestRuns
    milestone, iso_number, prefix = get_version(runner_build.build_data)
    milestone = project.get_milestone_by_name(name=milestone)

    test_plan_name = ' '.join(
        filter(lambda x: bool(x),
               (milestone['name'], prefix, 'iso', '#' + str(iso_number))))

    test_plan = project.get_plan_by_name(test_plan_name)
    iso_link = '/'.join([
        JENKINS['url'], 'job', '{0}.all'.format(milestone['name']),
        str(iso_number)
    ])
    if not test_plan:
        test_plan = project.add_plan(test_plan_name,
                                     description=iso_link,
                                     milestone_id=milestone['id'],
                                     entries=[])
        logger.info('Created new TestPlan "{0}".'.format(test_plan_name))
    else:
        logger.info('Found existing TestPlan "{0}".'.format(test_plan_name))

    if options.create_plan_only:
        return

    plan_entries = []
    all_cases = project.get_cases(suite_id=tests_suite['id'])
    for os in operation_systems:
        cases_ids = []
        if options.manual_run:
            all_results_groups = [r.group for r in tests_results[os['distro']]]
            for case in all_cases:
                if case['custom_test_group'] in all_results_groups:
                    cases_ids.append(case['id'])
        plan_entries.append(
            project.test_run_struct(
                name='{suite_name}'.format(suite_name=tests_suite['name']),
                suite_id=tests_suite['id'],
                milestone_id=milestone['id'],
                description='Results of system tests ({tests_suite}) on is'
                'o #"{iso_number}"'.format(tests_suite=tests_suite['name'],
                                           iso_number=iso_number),
                config_ids=[os['id']],
                include_all=True,
                case_ids=cases_ids))

    if not any(entry['suite_id'] == tests_suite['id']
               for entry in test_plan['entries']):
        if project.add_plan_entry(
                plan_id=test_plan['id'],
                suite_id=tests_suite['id'],
                config_ids=[os['id'] for os in operation_systems],
                runs=plan_entries):
            test_plan = project.get_plan(test_plan['id'])

    # STEP #4
    # Upload tests results to TestRail
    logger.info('Uploading tests results to TestRail...')
    for os in operation_systems:
        logger.info('Checking tests results for "{0}"...'.format(os['name']))
        results_to_publish = publish_results(
            project=project,
            milestone_id=milestone['id'],
            test_plan=test_plan,
            suite_id=tests_suite['id'],
            config_id=os['id'],
            results=tests_results[os['distro']])
        logger.debug('Added new results for tests ({os}): {tests}'.format(
            os=os['name'], tests=[r.group for r in results_to_publish]))

    logger.info('Report URL: {0}'.format(test_plan['url']))
예제 #25
0
def main():
    parser = argparse.ArgumentParser(
        description="Generate statistics for bugs linked to TestRun. Publish "
                    "statistics to testrail if necessary."
    )
    parser.add_argument('plan_id', type=int, nargs='?', default=None,
                        help='Test plan ID in TestRail')
    parser.add_argument('-j', '--job-name',
                        dest='job_name', type=str, default=None,
                        help='Name of Jenkins job which runs tests (runner). '
                             'It will be used for TestPlan search instead ID')
    parser.add_argument('-n', '--build-number', dest='build_number',
                        default='latest', help='Jenkins job build number')
    parser.add_argument('-r', '--run-id',
                        dest='run_ids', type=str, default=None,
                        help='(optional) IDs of TestRun to check (skip other)')
    parser.add_argument('-b', '--handle-blocked', action="store_true",
                        dest='handle_blocked', default=False,
                        help='Copy bugs links to downstream blocked results')
    parser.add_argument('-s', '--separate-runs', action="store_true",
                        dest='separate_runs', default=False,
                        help='Create separate statistics for each test run')
    parser.add_argument('-p', '--publish', action="store_true",
                        help='Publish statistics to TestPlan description')
    parser.add_argument('-o', '--out-file', dest='output_file',
                        default=None, type=str,
                        help='Path to file to save statistics as JSON and/or '
                             'HTML. Filename extension is added automatically')
    parser.add_argument('-H', '--html', action="store_true",
                        help='Save statistics in HTML format to file '
                             '(used with --out-file option)')
    parser.add_argument('-q', '--quiet', action="store_true",
                        help='Be quiet (disable logging except critical) '
                             'Overrides "--verbose" option.')
    parser.add_argument("-v", "--verbose", action="store_true",
                        help="Enable debug logging.")

    args = parser.parse_args()

    if args.verbose:
        logger.setLevel(DEBUG)

    if args.quiet:
        logger.setLevel(CRITICAL)

    testrail_project = get_testrail()

    if args.job_name:
        logger.info('Inspecting {0} build of {1} Jenkins job for TestPlan '
                    'details...'.format(args.build_number, args.job_name))
        test_plan_name = generate_test_plan_name(args.job_name,
                                                 args.build_number)
        test_plan = testrail_project.get_plan_by_name(test_plan_name)
        if test_plan:
            args.plan_id = test_plan['id']
        else:
            logger.warning('TestPlan "{0}" not found!'.format(test_plan_name))

    if not args.plan_id:
        logger.error('There is no TestPlan to process, exiting...')
        return 1

    run_ids = () if not args.run_ids else tuple(
        int(arg) for arg in args.run_ids.split(','))

    generator = StatisticsGenerator(testrail_project,
                                    args.plan_id,
                                    run_ids,
                                    args.handle_blocked)
    generator.generate()
    stats = generator.dump()

    if args.publish:
        logger.debug('Publishing bugs statistics to TestRail..')
        generator.publish(stats)

    if args.output_file:
        html = generator.dump_html(stats) if args.html else args.html
        save_stats_to_file(stats, args.output_file, html)

        if args.separate_runs:
            for run in generator.test_runs_stats:
                file_name = '{0}_{1}'.format(args.output_file, run['id'])
                stats = generator.dump(run_id=run['id'])
                html = (generator.dump_html(stats, run['id']) if args.html
                        else args.html)
                save_stats_to_file(stats, file_name, html)

    logger.info('Statistics generation complete!')
예제 #26
0
def main():
    """
    :param argv: command line arguments
    :return: None
    """

    parser = argparse.ArgumentParser(description='Get downstream build info'
                                     ' for Jenkins swarm.runner build.'
                                     ' Generate matrix statistics:'
                                     ' (failure group -> builds & tests).'
                                     ' Publish matrix to Testrail'
                                     ' if necessary.')
    parser.add_argument('-n',
                        '--build-number',
                        type=int,
                        required=False,
                        dest='build_number',
                        help='Jenkins job build number')
    parser.add_argument('-j',
                        '--job-name',
                        type=str,
                        dest='job_name',
                        default='9.0.swarm.runner',
                        help='Name of Jenkins job which runs tests (runner)')
    parser.add_argument('-f',
                        '--format',
                        type=str,
                        dest='formatfile',
                        default='html',
                        help='format statistics: html,json,table')
    parser.add_argument('-o',
                        '--out',
                        type=str,
                        dest="fileoutput",
                        default='failure_groups_statistics',
                        help='Save statistics to file')
    parser.add_argument('-t',
                        '--track',
                        action="store_true",
                        help='Publish statistics to TestPlan description')
    parser.add_argument('-q',
                        '--quiet',
                        action="store_true",
                        help='Be quiet (disable logging except critical) '
                        'Overrides "--verbose" option.')
    parser.add_argument("-v",
                        "--verbose",
                        action="store_true",
                        help="Enable debug logging.")
    args = parser.parse_args()

    if args.verbose:
        logger.setLevel(DEBUG)
    if args.quiet:
        logger.setLevel(CRITICAL)
    if args.formatfile and\
       args.formatfile not in ['json', 'html', 'xls', 'xlsx', 'yaml', 'csv']:
        logger.info('Not supported format output. Exit')
        return 2
    if not args.build_number:
        runner_build = Build(args.job_name, 'latest')
        logger.info('Latest build number is {}. Job is {}'.format(
            runner_build.number, args.job_name))
        args.build_number = runner_build.number

    logger.info('Getting subbuilds for {} {}'.format(args.job_name,
                                                     args.build_number))
    subbuilds, swarm_jenkins_info = get_sub_builds(args.build_number)
    if not subbuilds or not swarm_jenkins_info:
        logger.error('Necessary subbuilds info are absent. Exit')
        return 3
    logger.info('{} Subbuilds have been found'.format(len(subbuilds)))

    logger.info('Calculating failure groups')
    failure_gd = get_global_failure_group_list(subbuilds)[0]
    if not failure_gd:
        logger.error('Necessary failure grpoup info are absent. Exit')
        return 4
    logger.info('{} Failure groups have been found'.format(len(failure_gd)))

    logger.info('Getting TestRail data')
    testrail_testdata = get_testrail_testdata(args.job_name, args.build_number)
    if not testrail_testdata:
        logger.error('Necessary testrail info are absent. Exit')
        return 5
    logger.info('TestRail data have been downloaded')

    logger.info('Getting TestRail bugs')
    testrail_bugs = get_bugs(subbuilds, testrail_testdata)
    if not testrail_bugs:
        logger.error('Necessary testrail bugs info are absent. Exit')
        return 6
    logger.info('TestRail bugs have been got')

    logger.info('Update subbuilds data')
    update_subbuilds_failuregroup(subbuilds, failure_gd, testrail_testdata,
                                  testrail_bugs)
    logger.info('Subbuilds data have been updated')

    logger.info('Generating statistics across all failure groups')
    statistics = get_statistics(failure_gd, format_out=args.formatfile)
    if not statistics:
        logger.error('Necessary statistics info are absent. Exit')
        return 7
    logger.info('Statistics have been generated')

    if args.fileoutput and args.formatfile:
        logger.info('Save statistics')
        dump_statistics(statistics, args.build_number, args.job_name,
                        args.formatfile, args.fileoutput)
        logger.info('Statistics have been saved')
    if args.track:
        logger.info('Publish statistics to TestRail')
        if publish_statistics(statistics, args.build_number, args.job_name):
            logger.info('Statistics have been published')
        else:
            logger.info('Statistics have not been published'
                        'due to internal issue')