def main():
    parser = optparse.OptionParser(
        description='Publish the results of Automated Cloud Tests in TestRail')
    parser.add_option('-r', '--run-name', dest='run_name',
                      help='The name of a test run. '
                           'The name should describe the configuration '
                           'of the environment where Tempest tests were run')
    parser.add_option('-i', '--id', dest='test_suite_id', default="1595",
                      help='The id of test suite that should be updated with results of'
                           'the test run')
    parser.add_option('-n', '--case_name', dest='test_case_name', default="SimpleTestCase",
                      help='Name of the test case')

    (options, args) = parser.parse_args()

    if options.run_name is None:
        raise optparse.OptionValueError('No run name was specified!')

    # STEP #1
    # Initialize TestRail project client
    LOG.info('Initializing TestRail project client...')
    client = TestRailProject(url=TestRailSettings.url,
                             user="******",
                             password="",
                             project=TestRailSettings.project)
    LOG.info('TestRail project client has been initialized.')

    the_suite = client.get_suite(int(options.test_suite_id))
    LOG.info('Tests suite is "{0}".'.format(the_suite['name']))

    try:
        report_test_results_for_run(client, options.run_name, options.test_suite_id, options.test_case_name, 'passed')
    except APIError as api_error:
        LOG.exception(api_error)
def main():
    parser = OptionParser(
        description="Upload tests cases to TestRail. "
                    "See settings.py for configuration."
    )
    parser.add_option("-v", "--verbose",
                      action="store_true", dest="verbose", default=False,
                      help="Enable debug output")
    parser.add_option('-j', '--job-name', dest='job_name', default=None,
                      help='Jenkins swarm runner job name')
    parser.add_option('-N', '--build-number', dest='build_number',
                      default='latest',
                      help='Jenkins swarm runner build number')
    parser.add_option('-o', '--check_one_section', action="store_true",
                      dest='check_one_section', default=False,
                      help='Look for existing test case only in specified '
                           'section of test suite.')

    (options, args) = parser.parse_args()

    if options.verbose:
        logger.setLevel(DEBUG)

    project = TestRailProject(
        url=TestRailSettings.url,
        user=TestRailSettings.user,
        password=TestRailSettings.password,
        project=TestRailSettings.project
    )

    testrail_section = project.get_section_by_name(
        suite_id=project.get_suite_by_name(TestRailSettings.tests_suite)['id'],
        section_name=TestRailSettings.tests_section
    )

    testrail_milestone = project.get_milestone_by_name(
        name=TestRailSettings.milestone)

    tests_groups = get_tests_groups_from_jenkins(
        options.job_name, options.build_number) if options.job_name else []

    # If Jenkins job build is specified, but it doesn't have downstream builds
    # with tests groups in jobs names, then skip tests cases uploading because
    # ALL existing tests cases will be uploaded
    if options.job_name and not tests_groups:
        return

    tests_descriptions = get_tests_descriptions(
        milestone_id=testrail_milestone['id'],
        tests_include=TestRailSettings.tests_include,
        tests_exclude=TestRailSettings.tests_exclude,
        groups=tests_groups
    )

    upload_tests_descriptions(testrail_project=project,
                              section_id=testrail_section['id'],
                              tests=tests_descriptions,
                              check_all_sections=not options.check_one_section)
def main():
    testrail_project = TestRailProject(url=TestRailSettings.url,
                                       user=TestRailSettings.user,
                                       password=TestRailSettings.password,
                                       project=TestRailSettings.project)

    testrail_milestone = testrail_project.get_milestone_by_name(
        name=TestRailSettings.milestone)

    tests_descriptions = get_tests_descriptions(
        milestone_id=testrail_milestone['id'],
        tests_include=TestRailSettings.tests_include,
        tests_exclude=TestRailSettings.tests_exclude)

    upload_tests_descriptions(testrail_project=testrail_project,
                              tests=tests_descriptions)
Beispiel #4
0
def main():
    parser = optparse.OptionParser(
        description='Publish the results of Automated Cloud Tests in TestRail')
    parser.add_option('-r',
                      '--run-name',
                      dest='run_name',
                      help='The name of a test run. '
                      'The name should describe the configuration '
                      'of the environment where Tempest tests were run')
    parser.add_option(
        '-i',
        '--id',
        dest='test_suite_id',
        default="1595",
        help='The id of test suite that should be updated with results of'
        'the test run')
    parser.add_option('-n',
                      '--case_name',
                      dest='test_case_name',
                      default="SimpleTestCase",
                      help='Name of the test case')

    (options, args) = parser.parse_args()

    if options.run_name is None:
        raise optparse.OptionValueError('No run name was specified!')

    # STEP #1
    # Initialize TestRail project client
    LOG.info('Initializing TestRail project client...')
    client = TestRailProject(url=TestRailSettings.url,
                             user="******",
                             password="",
                             project=TestRailSettings.project)
    LOG.info('TestRail project client has been initialized.')

    the_suite = client.get_suite(int(options.test_suite_id))
    LOG.info('Tests suite is "{0}".'.format(the_suite['name']))

    try:
        report_test_results_for_run(client, options.run_name,
                                    options.test_suite_id,
                                    options.test_case_name, 'passed')
    except APIError as api_error:
        LOG.exception(api_error)
def main():
    testrail_project = TestRailProject(
        url=TestRailSettings.url,
        user=TestRailSettings.user,
        password=TestRailSettings.password,
        project=TestRailSettings.project
    )

    testrail_milestone = testrail_project.get_milestone_by_name(
        name=TestRailSettings.milestone)

    tests_descriptions = get_tests_descriptions(
        milestone_id=testrail_milestone['id'],
        tests_include=TestRailSettings.tests_include,
        tests_exclude=TestRailSettings.tests_exclude
    )

    upload_tests_descriptions(testrail_project=testrail_project,
                              tests=tests_descriptions)
Beispiel #6
0
def main():

    rally_name = TestRailSettings.tests_suite + ' ' + \
        TestRailSettings.tests_section

    cluster_description = os.environ.get('BUILD_URL', '') + '\n---\n'

    #
    # Collect info about cluster
    #

    # Initialize Nailgun client
    fuelmaster = os.environ.get('FUEL_IP', 'localhost')
    fuel_client = fuel.NailgunClient(fuelmaster)

    # Get Fuel version
    mos_version = os.environ.get('MOS_VERSION')
    mos_build = os.environ.get('MOS_BUILD')
    if mos_version and mos_build:
        fuel_version = {
            'release': mos_version,
            'build_number': mos_build,
        }
    else:
        fuel_version = fuel_client.get_api_version()
    # Build number should be an integer
    fuel_version['build_number'] = int(fuel_version['build_number'])
    cluster_description += 'Fuel version: {}-{}\n\n'.format(
        fuel_version['release'], fuel_version['build_number']
    )

    # Fuel cluster is needed only to get releases
    fuel_cluster = fuel_client.list_clusters()[0]
    # Release contains info about operating system
    fuel_release = fuel_client.get_releases_details(fuel_cluster['release_id'])
    cluster_description += 'Cluster configuration: {}\n\n'.format(
        fuel_release['name']
    )

    # Networking parameters
    cluster_network = fuel_client.get_networks(fuel_cluster['id'])
    # Network segmentation
    cluster_ns = cluster_network['networking_parameters']['segmentation_type']
    cluster_description += 'Network segmentation: {}\n\n'.format(
        cluster_ns
    )

    # Cluster nodes
    controllers = 0
    computes = 0
    for node in fuel_client.list_nodes():
        if(node['cluster'] == fuel_cluster['id']):
            if('controller' in node['roles']):
                controllers += 1
            if('compute' in node['roles']):
                computes += 1
    cluster_description += 'Total nodes:   {}\n'.format(controllers + computes)
    cluster_description += '+ controllers: {}\n'.format(controllers)
    cluster_description += '+ computes:    {}\n\n'.format(computes)

    # Other cluster options
    cluster_attributes = fuel_client.get_cluster_attributes(fuel_cluster['id'])
    cluster_components = get_enabled_attributes(cluster_attributes,
                                                'additional_components')
    cluster_description += 'Optional components: {}\n\n'.format(
        ', '.join(map(str.capitalize, cluster_components))
    )

    # Storage
    cluster_storage = get_enabled_attributes(cluster_attributes, 'storage')
    cluster_description += 'Storage: {}\n'.format(
        ', '.join(cluster_storage)
    )

    # Display Fuel info and cluster configuration
    print(cluster_description)

    #
    # Find appropriate existing or create new one test run in TestRail
    #

    # Initialize TestRail
    testrail = TestRailProject(
        url=TestRailSettings.url,
        user=TestRailSettings.user,
        password=TestRailSettings.password,
        project=TestRailSettings.project
    )

    # Find milestone
    for ms in testrail.get_milestones():
        if(ms['name'] == fuel_version['release']):
            milestone = ms
            break
    print('Testrail milestone: {}'.format(milestone['name']))

    # Find config
    for cf in testrail.get_configs():
        if(cf['name'] == 'Operation System'):
            for ccf in cf['configs']:
                if(ccf['name'].lower() in fuel_release['name'].lower()):
                    test_config = ccf
                    break
    print('Testrail configuration: {}'.format(test_config['name']))

    # Get test suite
    test_suite = testrail.get_suite_by_name(rally_name)
    if not test_suite:
        testrail.create_suite(
            name=rally_name,
            description='Periodic deployment tests by MOS Infra team.\nSee: '
            'https://jenkins.mosi.mirantis.net/view/Periodic%20(deployment)/'
        )

    # Get test cases for test section in suite
    test_cases = testrail.get_cases(
        suite_id=test_suite['id']
    )
    print('Testrail test suite "{}" contains {} test cases'.format(
        test_suite['name'], len(test_cases))
    )

    job_name = os.environ.get('CUSTOM_JOB', fuel_version['release'] + '.all')

    prefix = os.environ.get('ISO_PREFIX', '')

    # Test plans have names like "<fuel-version> iso #<fuel-build>"
    test_plan_name = '{milestone}{prefix} iso #{iso_number}'.format(
        milestone=milestone['name'],
        prefix=' ' + prefix if prefix else '',
        iso_number=fuel_version['build_number'])

    # Find appropriate test plan
    test_plan = testrail.get_plan_by_name(test_plan_name)
    if not test_plan:
        test_plan = testrail.add_plan(
            test_plan_name,
            description='{url}/job/{job}/{build}'.format(
                url=PRODUCT_JENKINS['url'],
                job=job_name,
                build=fuel_version['build_number']
            ),
            milestone_id=milestone['id'],
            entries=[]
        )

    # Create test plan entry (run)
    plan_entries = []
    plan_entries.append(
        testrail.test_run_struct(
            name=JENKINS_JOB,
            suite_id=test_suite['id'],
            milestone_id=milestone['id'],
            description=cluster_description,
            config_ids=[test_config['id']]
        )
    )

    # Add newly created plan entry to test plan and renew plan on success
    re_storage = re.compile('^([^_]+)')
    plan_entry_name = rally_name
    plan_entry_name += ' ({} controllers, {} computes)'.format(controllers,
                                                               computes)
    plan_entry_name += ': ' + cluster_ns.upper()
    if('volumes_lvm' in cluster_storage):
        plan_entry_name += '; LVM'
    else:
        plan_entry_name += '; Ceph for '
        plan_entry_name += ', '.join([
            re_storage.match(storage).group(1).capitalize()
            for storage in sorted(cluster_storage)
        ])
    if(cluster_components > 0):
        plan_entry_name += '; '
        plan_entry_name += ', '.join(map(str.capitalize,
                                         cluster_components))
    # Find appropriate run
    test_run = None
    for e in test_plan['entries']:
        if(e['suite_id'] == test_suite['id']
                and e['name'] == plan_entry_name):
            plan_entry = e
            test_run = get_run_by_config(
                plan_entry['runs'],
                test_suite['id'],
                milestone['id'],
                test_config['id']
            )
            if test_run:
                break
    # ... if not found, create new one
    if not test_run:
        plan_entry = testrail.add_plan_entry(
            plan_id=test_plan['id'],
            name=plan_entry_name,
            suite_id=test_suite['id'],
            config_ids=[test_config['id']],
            runs=plan_entries
        )
        test_run = get_run_by_config(
            plan_entry['runs'],
            test_suite['id'],
            milestone['id'],
            test_config['id']
        )
    print('Using Testrail run "{}" (ID {})'.format(
        test_run['name'],
        test_run['id']
    ))

    # Create list of test case names with ids for further use
    test_cases_exist = {}
    for tc in test_cases:
        test_cases_exist[tc['title']] = tc['id']

    # Will contain test results for publishing
    test_results = []

    # Will contain list of runned tests
    test_cases_run = []

    #
    # Proceed Rally results
    #

    # Get Rally config
    CONF(sys.argv[1:], project='rally')

    # Prepare regexp for component matching
    re_comp = re.compile('([A-Z]+[a-z]+)(.*)\.')

    # Use first avalable rally deployment
    deployment = rally.db.deployment_list()[0]

    # Get all tasks for specified deployment
    for task in rally.db.task_list(deployment=deployment.uuid):

        # Single task may have many scenarios
        for res in rally.db.task_result_get_all_by_uuid(task.uuid):

            atomic_actions = []

            # Create test case if it is not exists
            if res.key['name'] not in test_cases_exist.keys():
                print('Create new test case: {}'.format(
                    res.key['name'])
                )
                # Get atomic actions as steps if any
                if(len(res.data['raw']) > 0):
                    atomic_actions = [{
                        'content': aa,
                        'expected': 'Any positive value (seconds)'
                    } for aa in res.data['raw'][0]['atomic_actions']]

                test_section_name = re_comp.match(res.key['name']).group(1)

                # Check existense of tests section
                test_section = testrail.get_section_by_name(
                    suite_id=test_suite['id'],
                    section_name=test_section_name
                )
                # Create tests section if it doesn't exists
                if not test_section:
                    test_section = testrail.create_section(
                        suite_id=test_suite['id'],
                        name=test_section_name
                    )

                # Create test case object
                test_case = {
                    'title': res.key['name'],
                    'type_id': 1,
                    'priority_id': 5,
                    'custom_test_group': re_comp.match(
                        res.key['name']).group(2),
                    'custom_test_case_description': res.key['name'],
                    'custom_test_case_steps': atomic_actions
                }

                # Create test case in Testrail
                new_test_case = testrail.add_case(
                    section_id=test_section['id'],
                    case=test_case
                )

                # Register test case as existing
                test_cases.append(new_test_case)
                test_cases_exist[res.key['name']] = new_test_case['id']

            # Add test case to list of runned tests
            test_cases_run.append(test_cases_exist[res.key['name']])

            # Create test results
            del test_results[:]

            new_result = {
                'case_id': test_cases_exist[res.key['name']],
                'status_id': 1,
                'version': test_plan_name,
                'elapsed': '{}s'.format(int(res.data["full_duration"]))
                if(int(res.data["full_duration"]) > 0) else '0',
            }

            # Each test can have many iterations, so many results
            for result in res.data['raw']:
                # Fail entire test case if any iteration is failed
                if(len(result['error']) > 0):
                    new_result['status_id'] = 5

                # Collect info about atomic actions
                #  atomic_actions is array of dicts containing keys
                #  "content" and "expected"
                #  so need to add "actual" and "status_id"
                for aa in atomic_actions:
                    # Get name of already defined atomic action
                    aa_name = aa['content']
                    # Try to get duration of named atomic action
                    aa_duration = result['atomic_actions'].get(aa_name, 0.0)
                    aa_duration = round(float(aa_duration), 3)

                    # Summarize atomic actions durations
                    old_duration = aa.get('actual', '0.0')
                    aa['actual'] = str(float(old_duration) + aa_duration)

                    # Set atomic action status
                    # Assume that it is not failed
                    old_status = aa.get('status_id', 1)
                    # Fail atomic action if it's duration unset
                    # Atomic action doesn't contain key status_id, so it's must
                    # be set explicitly to failure (5) or success (1)
                    if(old_status == 1 and aa_duration == 0):
                        aa['status_id'] = 5
                    else:
                        aa['status_id'] = old_status

            new_result['custom_test_case_steps_results'] = atomic_actions

            # Append result to array
            test_results.append(new_result)

            # Send results
            if(test_run and test_results):
                print('Send results "{}"'.format(res.key['name']))
                testrail.add_results_for_cases(
                    run_id=test_run['id'],
                    results=test_results
                )
Beispiel #7
0
def main():

    parser = OptionParser(
        description="Publish results of system tests from Jenkins build to "
        "TestRail. See settings.py for configuration.")
    parser.add_option('-j',
                      '--job-name',
                      dest='job_name',
                      default=None,
                      help='Jenkins swarm runner job name')
    parser.add_option('-N',
                      '--build-number',
                      dest='build_number',
                      default='latest',
                      help='Jenkins swarm runner build number')
    parser.add_option('-o',
                      '--one-job',
                      dest='one_job_name',
                      default=None,
                      help=('Process only one job name from the specified '
                            'parent job or view'))
    parser.add_option("-w",
                      "--view",
                      dest="jenkins_view",
                      default=False,
                      help="Get system tests jobs from Jenkins view")
    parser.add_option("-l",
                      "--live",
                      dest="live_report",
                      action="store_true",
                      help="Get tests results from running swarm")
    parser.add_option("-m",
                      "--manual",
                      dest="manual_run",
                      action="store_true",
                      help="Manually add tests cases to TestRun (tested only)")
    parser.add_option("-s",
                      "--statistics",
                      action="store_true",
                      dest="bug_statistics",
                      default=False,
                      help="Make a statistics for bugs linked to TestRail for "
                      "the test run")
    parser.add_option('-c',
                      '--create-plan-only',
                      action="store_true",
                      dest="create_plan_only",
                      default=False,
                      help='Jenkins swarm runner job name')
    parser.add_option("-v",
                      "--verbose",
                      action="store_true",
                      dest="verbose",
                      default=False,
                      help="Enable debug output")

    (options, args) = parser.parse_args()

    if options.verbose:
        logger.setLevel(DEBUG)

    if options.live_report and options.build_number == 'latest':
        options.build_number = 'latest_started'

    # STEP #1
    # Initialize TestRail Project and define configuration
    logger.info('Initializing TestRail Project configuration...')
    project = TestRailProject(url=TestRailSettings.url,
                              user=TestRailSettings.user,
                              password=TestRailSettings.password,
                              project=TestRailSettings.project)

    tests_suite = project.get_suite_by_name(TestRailSettings.tests_suite)
    operation_systems = [
        {
            'name': config['name'],
            'id': config['id'],
            'distro': config['name'].split()[0].lower()
        }
        for config in project.get_config_by_name('Operation System')['configs']
        if config['name'] in TestRailSettings.operation_systems
    ]
    tests_results = {os['distro']: [] for os in operation_systems}

    # STEP #2
    # Get tests results from Jenkins
    logger.info('Getting tests results from Jenkins...')
    if options.jenkins_view:
        jobs = get_jobs_for_view(options.jenkins_view)
        tests_jobs = [{'name': j, 'number': 'latest'}
                      for j in jobs if 'system_test' in j] if \
            not options.create_plan_only else []
        runner_job = [j for j in jobs if 'runner' in j][0]
        runner_build = Build(runner_job, 'latest')
    elif options.job_name:
        runner_build = Build(options.job_name, options.build_number)
        tests_jobs = get_downstream_builds(runner_build.build_data) if \
            not options.create_plan_only else []
    else:
        logger.error("Please specify either Jenkins swarm runner job name (-j)"
                     " or Jenkins view with system tests jobs (-w). Exiting..")
        return

    is_running_builds = False

    for systest_build in tests_jobs:
        if (options.one_job_name
                and options.one_job_name != systest_build['name']):
            logger.debug(
                "Skipping '{0}' because --one-job is specified".format(
                    systest_build['name']))
            continue
        if options.job_name:
            if 'result' not in systest_build.keys():
                logger.debug("Skipping '{0}' job because it does't run tests "
                             "(build #{1} contains no results)".format(
                                 systest_build['name'],
                                 systest_build['number']))
                continue
            if systest_build['result'] is None:
                logger.debug("Skipping '{0}' job (build #{1}) because it's sti"
                             "ll running...".format(
                                 systest_build['name'],
                                 systest_build['number'],
                             ))
                is_running_builds = True
                continue
        for os in tests_results.keys():
            if os in systest_build['name'].lower():
                tests_results[os].extend(get_tests_results(systest_build, os))

    # STEP #3
    # Create new TestPlan in TestRail (or get existing) and add TestRuns
    milestone, iso_number, prefix = get_version(runner_build.build_data)
    milestone = project.get_milestone_by_name(name=milestone)

    test_plan_name = ' '.join(
        filter(lambda x: bool(x),
               (milestone['name'], prefix, 'iso', '#' + str(iso_number))))
    test_plan = project.get_plan_by_name(test_plan_name)
    if not test_plan:
        test_plan = project.add_plan(test_plan_name,
                                     description='/'.join([
                                         JENKINS['url'], 'job',
                                         '{0}.all'.format(milestone['name']),
                                         str(iso_number)
                                     ]),
                                     milestone_id=milestone['id'],
                                     entries=[])
        logger.info('Created new TestPlan "{0}".'.format(test_plan_name))
    else:
        logger.info('Found existing TestPlan "{0}".'.format(test_plan_name))

    if options.create_plan_only:
        return

    plan_entries = []
    all_cases = project.get_cases(suite_id=tests_suite['id'])
    for os in operation_systems:
        cases_ids = []
        if options.manual_run:
            all_results_groups = [r.group for r in tests_results[os['distro']]]
            for case in all_cases:
                if case['custom_test_group'] in all_results_groups:
                    cases_ids.append(case['id'])
        plan_entries.append(
            project.test_run_struct(
                name='{suite_name}'.format(suite_name=tests_suite['name']),
                suite_id=tests_suite['id'],
                milestone_id=milestone['id'],
                description='Results of system tests ({tests_suite}) on is'
                'o #"{iso_number}"'.format(tests_suite=tests_suite['name'],
                                           iso_number=iso_number),
                config_ids=[os['id']],
                include_all=True,
                case_ids=cases_ids))

    if not any(entry['suite_id'] == tests_suite['id']
               for entry in test_plan['entries']):
        if project.add_plan_entry(
                plan_id=test_plan['id'],
                suite_id=tests_suite['id'],
                config_ids=[os['id'] for os in operation_systems],
                runs=plan_entries):
            test_plan = project.get_plan(test_plan['id'])

    # STEP #4
    # Upload tests results to TestRail
    logger.info('Uploading tests results to TestRail...')
    for os in operation_systems:
        logger.info('Checking tests results for "{0}"...'.format(os['name']))
        results_to_publish = publish_results(
            project=project,
            milestone_id=milestone['id'],
            test_plan=test_plan,
            suite_id=tests_suite['id'],
            config_id=os['id'],
            results=tests_results[os['distro']])
        logger.debug('Added new results for tests ({os}): {tests}'.format(
            os=os['name'], tests=[r.group for r in results_to_publish]))

    logger.info('Report URL: {0}'.format(test_plan['url']))

    # STEP #5
    # Provide the bugs linked in TestRail for current run as a short statistics
    if options.bug_statistics:
        if is_running_builds:
            logger.info("Some jobs are still running. "
                        "Skipping bug statistics report, please try later.")
        else:
            logger.info("Generating a bug statistics report...")
            make_bug_statistics(tests_results, operation_systems)
Beispiel #8
0
def main():
    parser = OptionParser(description="Upload tests cases to TestRail. "
                          "See settings.py for configuration.")
    parser.add_option("-v",
                      "--verbose",
                      action="store_true",
                      dest="verbose",
                      default=False,
                      help="Enable debug output")
    parser.add_option('-j',
                      '--job-name',
                      dest='job_name',
                      default=None,
                      help='Jenkins swarm runner job name')
    parser.add_option('-N',
                      '--build-number',
                      dest='build_number',
                      default='latest',
                      help='Jenkins swarm runner build number')
    parser.add_option('-o',
                      '--check_one_section',
                      action="store_true",
                      dest='check_one_section',
                      default=False,
                      help='Look for existing test case only in specified '
                      'section of test suite.')

    (options, args) = parser.parse_args()

    if options.verbose:
        logger.setLevel(DEBUG)

    project = TestRailProject(url=TestRailSettings.url,
                              user=TestRailSettings.user,
                              password=TestRailSettings.password,
                              project=TestRailSettings.project)

    testrail_section = project.get_section_by_name(
        suite_id=project.get_suite_by_name(TestRailSettings.tests_suite)['id'],
        section_name=TestRailSettings.tests_section)

    testrail_milestone = project.get_milestone_by_name(
        name=TestRailSettings.milestone)

    testrail_default_test_priority = [
        priority['id'] for priority in project.get_priorities()
        if priority['is_default'] is True
    ][0]

    distros = [
        config['name'].split()[0].lower()
        for config in project.get_config_by_name('Operation System')['configs']
        if config['name'] in TestRailSettings.operation_systems
    ]

    tests_groups = get_tests_groups_from_jenkins(
        options.job_name, options.build_number,
        distros) if options.job_name else []

    # If Jenkins job build is specified, but it doesn't have downstream builds
    # with tests groups in jobs names, then skip tests cases uploading because
    # ALL existing tests cases will be uploaded
    if options.job_name and not tests_groups:
        return

    tests_descriptions = get_tests_descriptions(
        milestone_id=testrail_milestone['id'],
        tests_include=TestRailSettings.tests_include,
        tests_exclude=TestRailSettings.tests_exclude,
        groups=tests_groups,
        default_test_priority=testrail_default_test_priority)

    upload_tests_descriptions(testrail_project=project,
                              section_id=testrail_section['id'],
                              tests=tests_descriptions,
                              check_all_sections=not options.check_one_section)
def get_testrail():
    logger.info('Initializing TestRail Project configuration...')
    return TestRailProject(url=TestRailSettings.url,
                           user=TestRailSettings.user,
                           password=TestRailSettings.password,
                           project=TestRailSettings.project)
Beispiel #10
0
def main():

    parser = OptionParser(
        description="Publish results of system tests from Jenkins build to "
                    "TestRail. See settings.py for configuration."
    )
    parser.add_option('-j', '--job-name', dest='job_name', default=None,
                      help='Jenkins swarm runner job name')
    parser.add_option('-N', '--build-number', dest='build_number',
                      default='latest',
                      help='Jenkins swarm runner build number')
    parser.add_option('-o', '--one-job', dest='one_job_name',
                      default=None,
                      help=('Process only one job name from the specified '
                            'parent job or view'))
    parser.add_option("-w", "--view", dest="jenkins_view", default=False,
                      help="Get system tests jobs from Jenkins view")
    parser.add_option("-l", "--live", dest="live_report", action="store_true",
                      help="Get tests results from running swarm")
    parser.add_option("-m", "--manual", dest="manual_run", action="store_true",
                      help="Manually add tests cases to TestRun (tested only)")
    parser.add_option("-s", "--statistics", action="store_true",
                      dest="bug_statistics", default=False,
                      help="Make a statistics for bugs linked to TestRail for "
                      "the test run")
    parser.add_option('-c', '--create-plan-only', action="store_true",
                      dest="create_plan_only", default=False,
                      help='Jenkins swarm runner job name')
    parser.add_option("-v", "--verbose",
                      action="store_true", dest="verbose", default=False,
                      help="Enable debug output")

    (options, args) = parser.parse_args()

    if options.verbose:
        logger.setLevel(DEBUG)

    if options.live_report and options.build_number == 'latest':
        options.build_number = 'latest_started'

    # STEP #1
    # Initialize TestRail Project and define configuration
    logger.info('Initializing TestRail Project configuration...')
    project = TestRailProject(url=TestRailSettings.url,
                              user=TestRailSettings.user,
                              password=TestRailSettings.password,
                              project=TestRailSettings.project)

    tests_suite = project.get_suite_by_name(TestRailSettings.tests_suite)
    operation_systems = [{'name': config['name'], 'id': config['id'],
                         'distro': config['name'].split()[0].lower()}
                         for config in project.get_config_by_name(
                             'Operation System')['configs'] if
                         config['name'] in TestRailSettings.operation_systems]
    tests_results = {os['distro']: [] for os in operation_systems}

    # STEP #2
    # Get tests results from Jenkins
    logger.info('Getting tests results from Jenkins...')
    if options.jenkins_view:
        jobs = get_jobs_for_view(options.jenkins_view)
        tests_jobs = [{'name': j, 'number': 'latest'}
                      for j in jobs if 'system_test' in j] if \
            not options.create_plan_only else []
        runner_job = [j for j in jobs if 'runner' in j][0]
        runner_build = Build(runner_job, 'latest')
    elif options.job_name:
        runner_build = Build(options.job_name, options.build_number)
        tests_jobs = get_downstream_builds(runner_build.build_data) if \
            not options.create_plan_only else []
    else:
        logger.error("Please specify either Jenkins swarm runner job name (-j)"
                     " or Jenkins view with system tests jobs (-w). Exiting..")
        return

    is_running_builds = False

    for systest_build in tests_jobs:
        if (options.one_job_name and
                options.one_job_name != systest_build['name']):
            logger.debug("Skipping '{0}' because --one-job is specified"
                         .format(systest_build['name']))
            continue
        if options.job_name:
            if 'result' not in systest_build.keys():
                logger.debug("Skipping '{0}' job because it does't run tests "
                             "(build #{1} contains no results)".format(
                                 systest_build['name'],
                                 systest_build['number']))
                continue
            if systest_build['result'] is None:
                logger.debug("Skipping '{0}' job (build #{1}) because it's sti"
                             "ll running...".format(systest_build['name'],
                                                    systest_build['number'],))
                is_running_builds = True
                continue
        for os in tests_results.keys():
            if os in systest_build['name'].lower():
                tests_results[os].extend(get_tests_results(systest_build, os))

    # STEP #3
    # Create new TestPlan in TestRail (or get existing) and add TestRuns
    milestone, iso_number, prefix = get_version(runner_build.build_data)
    milestone = project.get_milestone_by_name(name=milestone)

    test_plan_name = ' '.join(
        filter(lambda x: bool(x),
               (milestone['name'], prefix, 'iso', '#' + str(iso_number))))

    test_plan = project.get_plan_by_name(test_plan_name)
    iso_link = '/'.join([JENKINS['url'], 'job',
                         '{0}.all'.format(milestone['name']), str(iso_number)])
    if not test_plan:
        test_plan = project.add_plan(test_plan_name,
                                     description=iso_link,
                                     milestone_id=milestone['id'],
                                     entries=[]
                                     )
        logger.info('Created new TestPlan "{0}".'.format(test_plan_name))
    else:
        logger.info('Found existing TestPlan "{0}".'.format(test_plan_name))

    if options.create_plan_only:
        return

    plan_entries = []
    all_cases = project.get_cases(suite_id=tests_suite['id'])
    for os in operation_systems:
        cases_ids = []
        if options.manual_run:
            all_results_groups = [r.group for r in tests_results[os['distro']]]
            for case in all_cases:
                if case['custom_test_group'] in all_results_groups:
                    cases_ids.append(case['id'])
        plan_entries.append(
            project.test_run_struct(
                name='{suite_name}'.format(suite_name=tests_suite['name']),
                suite_id=tests_suite['id'],
                milestone_id=milestone['id'],
                description='Results of system tests ({tests_suite}) on is'
                'o #"{iso_number}"'.format(tests_suite=tests_suite['name'],
                                           iso_number=iso_number),
                config_ids=[os['id']],
                include_all=True,
                case_ids=cases_ids
            )
        )

    if not any(entry['suite_id'] == tests_suite['id']
               for entry in test_plan['entries']):
        if project.add_plan_entry(plan_id=test_plan['id'],
                                  suite_id=tests_suite['id'],
                                  config_ids=[os['id'] for os
                                              in operation_systems],
                                  runs=plan_entries):
            test_plan = project.get_plan(test_plan['id'])

    # STEP #4
    # Upload tests results to TestRail
    logger.info('Uploading tests results to TestRail...')
    for os in operation_systems:
        logger.info('Checking tests results for "{0}"...'.format(os['name']))
        results_to_publish = publish_results(
            project=project,
            milestone_id=milestone['id'],
            test_plan=test_plan,
            suite_id=tests_suite['id'],
            config_id=os['id'],
            results=tests_results[os['distro']]
        )
        logger.debug('Added new results for tests ({os}): {tests}'.format(
            os=os['name'], tests=[r.group for r in results_to_publish]
        ))

    logger.info('Report URL: {0}'.format(test_plan['url']))

    # STEP #5
    # Provide the bugs linked in TestRail for current run as a short statistics
    if options.bug_statistics:
        if is_running_builds:
            logger.info("Some jobs are still running. "
                        "Skipping bug statistics report, please try later.")
        else:
            logger.info("Generating a bug statistics report...")
            bug_results = make_bug_statistics(tests_results, test_plan,
                                              tests_suite, project,
                                              operation_systems)
            project.update_plan(plan_id=test_plan['id'],
                                description=test_plan['description'] + '\n' +
                                bug_results)
def main():
    parser = OptionParser(description="Upload tests cases to TestRail. " "See settings.py for configuration.")
    parser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False, help="Enable debug output")
    parser.add_option("-j", "--job-name", dest="job_name", default=None, help="Jenkins swarm runner job name")
    parser.add_option(
        "-N", "--build-number", dest="build_number", default="latest", help="Jenkins swarm runner build number"
    )
    parser.add_option(
        "-o",
        "--check_one_section",
        action="store_true",
        dest="check_one_section",
        default=False,
        help="Look for existing test case only in specified " "section of test suite.",
    )

    (options, args) = parser.parse_args()

    if options.verbose:
        logger.setLevel(DEBUG)

    project = TestRailProject(
        url=TestRailSettings.url,
        user=TestRailSettings.user,
        password=TestRailSettings.password,
        project=TestRailSettings.project,
    )

    testrail_section = project.get_section_by_name(
        suite_id=project.get_suite_by_name(TestRailSettings.tests_suite)["id"],
        section_name=TestRailSettings.tests_section,
    )

    testrail_milestone = project.get_milestone_by_name(name=TestRailSettings.milestone)

    distros = [
        config["name"].split()[0].lower()
        for config in project.get_config_by_name("Operation System")["configs"]
        if config["name"] in TestRailSettings.operation_systems
    ]

    tests_groups = (
        get_tests_groups_from_jenkins(options.job_name, options.build_number, distros) if options.job_name else []
    )

    # If Jenkins job build is specified, but it doesn't have downstream builds
    # with tests groups in jobs names, then skip tests cases uploading because
    # ALL existing tests cases will be uploaded
    if options.job_name and not tests_groups:
        return

    tests_descriptions = get_tests_descriptions(
        milestone_id=testrail_milestone["id"],
        tests_include=TestRailSettings.tests_include,
        tests_exclude=TestRailSettings.tests_exclude,
        groups=tests_groups,
    )

    upload_tests_descriptions(
        testrail_project=project,
        section_id=testrail_section["id"],
        tests=tests_descriptions,
        check_all_sections=not options.check_one_section,
    )
milestone = "8.0"
team = "PCE"
test_case_type = "manual"
complexity = "core"

milestones = {"7.0": 9, "8.0": 10, "6.1": 4, "6.0": 5, "5.1.1": 8,
              "5.1.2": 7, "6.0.1": 6}
qa_teams = {"MOS": 4, "Fuel": 2, "Maintenance": 3, "Framework-CI": 1,
            "Performance": 5, "PCE": 6, "Telco": 7}
complexity_types = {"core": 2, "smoke": 1, "advanced": 3}
types = {"automated": 1, "manual": 7}

# Initialize TestRail project client
client = TestRailProject(url=TestRailSettings.url,
                         user=TestRailSettings.user,
                         password=TestRailSettings.password,
                         project=TestRailSettings.project)

tests_suite = client.get_suite_by_name(TestRailSettings.tests_suite)

cases = client.get_cases(tests_suite["id"])
for case in cases:
    #if case["id"] == 542603:
    #    print case
    #continue

    need_update = False

    if case["custom_qa_team"] != qa_teams[team]:
        case["custom_qa_team"] = qa_teams[team]
        need_update = True
Beispiel #13
0
def main():

    parser = OptionParser(
        description="Publish results of system tests from Jenkins build to "
        "TestRail. See settings.py for configuration."
    )
    parser.add_option("-j", "--job-name", dest="job_name", default=None, help="Jenkins swarm runner job name")
    parser.add_option(
        "-N", "--build-number", dest="build_number", default="latest", help="Jenkins swarm runner build number"
    )
    parser.add_option(
        "-o",
        "--one-job",
        dest="one_job_name",
        default=None,
        help=("Process only one job name from the specified " "parent job or view"),
    )
    parser.add_option(
        "-w", "--view", dest="jenkins_view", default=False, help="Get system tests jobs from Jenkins view"
    )
    parser.add_option(
        "-l", "--live", dest="live_report", action="store_true", help="Get tests results from running swarm"
    )
    parser.add_option(
        "-m",
        "--manual",
        dest="manual_run",
        action="store_true",
        help="Manually add tests cases to TestRun (tested only)",
    )
    parser.add_option(
        "-s",
        "--statistics",
        action="store_true",
        dest="bug_statistics",
        default=False,
        help="Make a statistics for bugs linked to TestRail for " "the test run",
    )
    parser.add_option(
        "-c",
        "--create-plan-only",
        action="store_true",
        dest="create_plan_only",
        default=False,
        help="Jenkins swarm runner job name",
    )
    parser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False, help="Enable debug output")

    (options, args) = parser.parse_args()

    if options.verbose:
        logger.setLevel(DEBUG)

    if options.live_report and options.build_number == "latest":
        options.build_number = "latest_started"

    # STEP #1
    # Initialize TestRail Project and define configuration
    logger.info("Initializing TestRail Project configuration...")
    project = TestRailProject(
        url=TestRailSettings.url,
        user=TestRailSettings.user,
        password=TestRailSettings.password,
        project=TestRailSettings.project,
    )

    tests_suite = project.get_suite_by_name(TestRailSettings.tests_suite)
    operation_systems = [
        {"name": config["name"], "id": config["id"], "distro": config["name"].split()[0].lower()}
        for config in project.get_config_by_name("Operation System")["configs"]
        if config["name"] in TestRailSettings.operation_systems
    ]
    tests_results = {os["distro"]: [] for os in operation_systems}

    # STEP #2
    # Get tests results from Jenkins
    logger.info("Getting tests results from Jenkins...")
    if options.jenkins_view:
        jobs = get_jobs_for_view(options.jenkins_view)
        tests_jobs = (
            [{"name": j, "number": "latest"} for j in jobs if "system_test" in j]
            if not options.create_plan_only
            else []
        )
        runner_job = [j for j in jobs if "runner" in j][0]
        runner_build = Build(runner_job, "latest")
    elif options.job_name:
        runner_build = Build(options.job_name, options.build_number)
        tests_jobs = get_downstream_builds(runner_build.build_data) if not options.create_plan_only else []
    else:
        logger.error(
            "Please specify either Jenkins swarm runner job name (-j)"
            " or Jenkins view with system tests jobs (-w). Exiting.."
        )
        return

    is_running_builds = False

    for systest_build in tests_jobs:
        if options.one_job_name and options.one_job_name != systest_build["name"]:
            logger.debug("Skipping '{0}' because --one-job is specified".format(systest_build["name"]))
            continue
        if options.job_name:
            if "result" not in systest_build.keys():
                logger.debug(
                    "Skipping '{0}' job because it does't run tests "
                    "(build #{1} contains no results)".format(systest_build["name"], systest_build["number"])
                )
                continue
            if systest_build["result"] is None:
                logger.debug(
                    "Skipping '{0}' job (build #{1}) because it's sti"
                    "ll running...".format(systest_build["name"], systest_build["number"])
                )
                is_running_builds = True
                continue
        for os in tests_results.keys():
            if os in systest_build["name"].lower():
                tests_results[os].extend(get_tests_results(systest_build, os))

    # STEP #3
    # Create new TestPlan in TestRail (or get existing) and add TestRuns
    milestone, iso_number, prefix = get_version(runner_build.build_data)
    milestone = project.get_milestone_by_name(name=milestone)

    test_plan_name = " ".join(filter(lambda x: bool(x), (milestone["name"], prefix, "iso", "#" + str(iso_number))))

    test_plan = project.get_plan_by_name(test_plan_name)
    iso_link = "/".join([JENKINS["url"], "job", "{0}.all".format(milestone["name"]), str(iso_number)])
    if not test_plan:
        test_plan = project.add_plan(test_plan_name, description=iso_link, milestone_id=milestone["id"], entries=[])
        logger.info('Created new TestPlan "{0}".'.format(test_plan_name))
    else:
        logger.info('Found existing TestPlan "{0}".'.format(test_plan_name))

    if options.create_plan_only:
        return

    plan_entries = []
    all_cases = project.get_cases(suite_id=tests_suite["id"])
    for os in operation_systems:
        cases_ids = []
        if options.manual_run:
            all_results_groups = [r.group for r in tests_results[os["distro"]]]
            for case in all_cases:
                if case["custom_test_group"] in all_results_groups:
                    cases_ids.append(case["id"])
        plan_entries.append(
            project.test_run_struct(
                name="{suite_name}".format(suite_name=tests_suite["name"]),
                suite_id=tests_suite["id"],
                milestone_id=milestone["id"],
                description="Results of system tests ({tests_suite}) on is"
                'o #"{iso_number}"'.format(tests_suite=tests_suite["name"], iso_number=iso_number),
                config_ids=[os["id"]],
                include_all=True,
                case_ids=cases_ids,
            )
        )

    if not any(entry["suite_id"] == tests_suite["id"] for entry in test_plan["entries"]):
        if project.add_plan_entry(
            plan_id=test_plan["id"],
            suite_id=tests_suite["id"],
            config_ids=[os["id"] for os in operation_systems],
            runs=plan_entries,
        ):
            test_plan = project.get_plan(test_plan["id"])

    # STEP #4
    # Upload tests results to TestRail
    logger.info("Uploading tests results to TestRail...")
    for os in operation_systems:
        logger.info('Checking tests results for "{0}"...'.format(os["name"]))
        results_to_publish = publish_results(
            project=project,
            milestone_id=milestone["id"],
            test_plan=test_plan,
            suite_id=tests_suite["id"],
            config_id=os["id"],
            results=tests_results[os["distro"]],
        )
        logger.debug(
            "Added new results for tests ({os}): {tests}".format(
                os=os["name"], tests=[r.group for r in results_to_publish]
            )
        )

    logger.info("Report URL: {0}".format(test_plan["url"]))

    # STEP #5
    # Provide the bugs linked in TestRail for current run as a short statistics
    if options.bug_statistics:
        if is_running_builds:
            logger.info("Some jobs are still running. " "Skipping bug statistics report, please try later.")
        else:
            logger.info("Generating a bug statistics report...")
            bug_results = make_bug_statistics(tests_results, test_plan, tests_suite, project, operation_systems)
            project.update_plan(plan_id=test_plan["id"], description=test_plan["description"] + "\n" + bug_results)
Beispiel #14
0
def main():
    parser = OptionParser(
        description="Publish results of system tests from Jenkins build to "
                    "TestRail. See settings.py for configuration."
    )
    parser.add_option('-j', '--job-name', dest='job_name', default=None,
                      help='Jenkins swarm runner job name')
    parser.add_option('-N', '--build-number', dest='build_number',
                      default='latest',
                      help='Jenkins swarm runner build number')
    parser.add_option("-l", "--live", dest="live_report", action="store_true",
                      help="Get tests results from running swarm")
    parser.add_option("-v", "--verbose",
                      action="store_true", dest="verbose", default=False,
                      help="Enable debug output")

    (options, args) = parser.parse_args()

    if options.verbose:
        logger.setLevel(DEBUG)

    if options.live_report and options.build_number == 'latest':
        build_number = 'latest_started'
    else:
        build_number = options.build_number

    # STEP #1
    # Initialize TestRail Project and define configuration
    logger.info('Initializing TestRail Project configuration...')
    project = TestRailProject(url=TestRailSettings.url,
                              user=TestRailSettings.user,
                              password=TestRailSettings.password,
                              project=TestRailSettings.project)
    logger.info('Initializing TestRail Project configuration... done')

    operation_systems = [{'name': config['name'], 'id': config['id'],
                          'distro': config['name'].split()[0].lower()}
                         for config in project.get_config_by_name(
                             'Operation System')['configs']]
    os_mile = {'6.1': ['Centos 6.5', 'Ubuntu 14.04'],
               '6.0.1': ['Centos 6.5', 'Ubuntu 12.04']}

    tests_results = {}

    # STEP #2
    # Get tests results from Jenkins
    runner_build = Build(options.job_name, build_number)
    runs = runner_build.build_data['runs']

    # Analyze each test individually
    for run_one in runs:
        if '5.1' in run_one['url']:
            continue  # Release 5.1 to skip
        tests_result = get_job_info(run_one['url'])
        if not tests_result['description']:
            continue  # Not completed results to skip
        if 'skipping' in tests_result['description']:
            continue  # Not performed tests to skip
        tests_job = {'result': tests_result['result'],
                     'name': (options.job_name + '/' +
                              tests_result['url'].split('/')[-3]),
                     'number': int(tests_result['url'].split('/')[-2]),
                     'mile': (tests_result['description'].
                              split()[0].split('-')[0]),
                     'iso': (int(tests_result['description'].
                             split()[0].split('-')[1]))}
        if tests_job['mile'] not in tests_results:
            tests_results[tests_job['mile']] = {}
        test_mile = tests_results[tests_job['mile']]
        if tests_job['iso'] not in test_mile:
            test_mile[tests_job['iso']] = {}
        test_iso = test_mile[tests_job['iso']]
        for os in operation_systems:
            if os['distro'] in tests_job['name'].lower() and\
                    os['name'] in os_mile[tests_job['mile']]:
                if os['id'] not in test_iso:
                    (test_iso[os['id']]) = []
                test_os_id = test_iso[os['id']]
                test_os_id.extend(get_tests_results(tests_job))

    # STEP #3
    # Create new TestPlan in TestRail (or get existing) and add TestRuns
    for mile in tests_results:
        mile_tests_suite = '{0}{1}'.format(TestRailSettings.tests_suite, mile)
        logger.info(mile_tests_suite)
        tests_suite = project.get_suite_by_name(mile_tests_suite)
        milestone = project.get_milestone_by_name(name=mile)
        for iso_number in tests_results.get(mile, {}):
            # Create new TestPlan name check the same name in testrail
            test_plan_name = '{milestone} iso #{iso_number}'.format(
                milestone=milestone['name'],
                iso_number=iso_number)
            test_plan = project.get_plan_by_name(test_plan_name)
            if not test_plan:
                test_plan = project.add_plan(
                    test_plan_name,
                    description='/'.join([JENKINS['url'],
                                          'job',
                                          '{0}.all'.format(milestone['name']),
                                          str(iso_number)]),
                    milestone_id=milestone['id'],
                    entries=[])
                logger.info('Created new TestPlan "{0}".'
                            .format(test_plan_name))
            else:
                logger.info('Found existing TestPlan "{0}".'
                            .format(test_plan_name))
            plan_entries = []
            # Create a test plan entry
            config_ids = []
            for os in operation_systems:
                if os['name'] in os_mile[mile]:
                    config_ids.append(os['id'])
                    cases_ids = []
                    plan_entries.append(
                        project.test_run_struct(
                            name=tests_suite['name'],
                            suite_id=tests_suite['id'],
                            milestone_id=milestone['id'],
                            description=('Results of system tests ({t_suite})'
                                         ' on iso #"{iso_number}"'
                                         .format(t_suite=tests_suite['name'],
                                                 iso_number=iso_number)),
                            config_ids=[os['id']],
                            include_all=True,
                            case_ids=cases_ids))
            # Create a test plan entry with the test run
            run = find_run_by_name(test_plan, tests_suite['name'])
            if not run:
                logger.info('Adding a test plan entry with test run %s ...',
                            tests_suite['name'])
                entry = project.add_plan_entry(plan_id=test_plan['id'],
                                               suite_id=tests_suite['id'],
                                               config_ids=config_ids,
                                               runs=plan_entries)
                logger.info('The test plan entry has been added.')
                run = entry['runs'][0]
            test_plan = project.get_plan(test_plan['id'])

            # STEP #4
            # Upload tests results to TestRail
            logger.info('Uploading tests results to TestRail...')
            for os_id in tests_results.get(mile, {})\
                    .get(iso_number, {}):
                logger.info('Checking tests results for %s...',
                            project.get_config(os_id)['name'])
                tests_added = publish_results(
                    project=project,
                    milestone_id=milestone['id'],
                    test_plan=test_plan,
                    suite_id=tests_suite['id'],
                    config_id=os_id,
                    results=tests_results[mile][iso_number][os_id])
                logger.debug('Added new results for tests (%s): %s',
                             project.get_config(os_id)['name'],
                             [r.group for r in tests_added])

            logger.info('Report URL: %s', test_plan['url'])
def main():
    parser = optparse.OptionParser(
        description='Publish the results of Tempest tests in TestRail')
    parser.add_option('-r', '--run-name', dest='run_name',
                      help='The name of a test run. '
                           'The name should describe the configuration '
                           'of the environment where Tempest tests were run')
    parser.add_option('-i', '--iso', dest='iso_number', help='ISO number')
    parser.add_option('-p', '--path-to-report', dest='path',
                      help='The path to the Tempest XML report')
    parser.add_option('-c', '--conf', dest='config', default='Ubuntu 14.04',
                      help='The name of one of the configurations')
    parser.add_option('-m', '--multithreading', dest='threads_count',
                      default=100, help='The count of threads '
                                        'for uploading the test results')
    parser.add_option('-b', '--block-all-tests',
                      dest='all_tests_blocked', action='store_true',
                      help='Mark all Tempest tests as "blocked"')
    parser.add_option('-t', '--tests-in-progress',
                      dest='tests_in_progress', action='store_true',
                      help='Mark all Tempest tests as "in progress"')

    (options, args) = parser.parse_args()

    if options.run_name is None:
        raise optparse.OptionValueError('No run name was specified!')
    if options.iso_number is None:
        raise optparse.OptionValueError('No ISO number was specified!')
    if (options.path is None and
            not options.all_tests_blocked and not options.tests_in_progress):
        raise optparse.OptionValueError('No path to the Tempest '
                                        'XML report was specified!')

    # STEP #1
    # Initialize TestRail project client
    LOG.info('Initializing TestRail project client...')
    client = TestRailProject(url=TestRailSettings.url,
                             user=TestRailSettings.user,
                             password=TestRailSettings.password,
                             project=TestRailSettings.project)
    LOG.info('TestRail project client has been initialized.')

    tests_suite = client.get_suite_by_name(TestRailSettings.tests_suite)
    LOG.info('Tests suite is "{0}".'.format(tests_suite['name']))

    # STEP #2
    # Parse the test results
    if options.all_tests_blocked:
        test_results = mark_all_tests_as_blocked(client, tests_suite)
    elif options.tests_in_progress:
        test_results = mark_all_tests_as_in_progress(client, tests_suite)
    else:
        LOG.info('Parsing the test results...')
        test_results = parse_xml_report(options.path)
        LOG.info('The test results have been parsed.')

    # STEP #3
    # Create new test plan (or find existing)
    milestone = client.get_milestone_by_name(TestRailSettings.milestone)
    test_plan_name = '{0} iso #{1}'.format(milestone['name'],
                                           options.iso_number)
    LOG.info('Test plan name is "{0}".'.format(test_plan_name))

    LOG.info('Trying to find test plan "{0}"...'.format(test_plan_name))
    test_plan = client.get_plan_by_name(test_plan_name)
    if not test_plan:
        LOG.info('The test plan not found. Creating one...')
        url = '/job/{0}.all/{1}'.format(milestone['name'], options.iso_number)
        description = urlparse.urljoin(JENKINS['url'], url)
        test_plan = client.add_plan(test_plan_name,
                                    description=description,
                                    milestone_id=milestone['id'],
                                    entries=[])
        LOG.info('The test plan has been created.')
    else:
        LOG.info('The test plan found.')

    # Get ID of each OS from list "TestRailSettings.operation_systems"
    config_ids = []
    for os_name in TestRailSettings.operation_systems:
        for conf in client.get_config_by_name('Operation System')['configs']:
            if conf['name'] == os_name:
                config_ids.append(conf['id'])
                break

    # Define test runs for CentOS and Ubuntu
    run_name = 'Tempest - ' + options.run_name
    runs = []
    for conf_id in config_ids:
        run = client.test_run_struct(name=run_name,
                                     suite_id=tests_suite['id'],
                                     milestone_id=milestone['id'],
                                     description='Tempest results',
                                     config_ids=[conf_id])
        runs.append(run)

    # Create a test plan entry with the test runs
    run = find_run_by_name_and_config_in_test_plan(test_plan,
                                                   run_name, options.config)
    if not run:
        LOG.info('Adding a test plan entry with test run '
                 '"{0} ({1})" ...'.format(run_name, options.config))
        entry = client.add_plan_entry(plan_id=test_plan['id'],
                                      suite_id=tests_suite['id'],
                                      config_ids=config_ids,
                                      runs=runs,
                                      name=run_name)
        LOG.info('The test plan entry has been added.')
        run = find_run_by_config_in_test_plan_entry(entry, options.config)

    # STEP #4
    # Upload the test results to TestRail for the specified test run
    LOG.info('Uploading the test results to TestRail...')
    tries_count = 10
    threads_count = options.threads_count
    while tries_count > 0:
        try:
            joblib.Parallel(n_jobs=threads_count)(joblib.delayed(
                upload_test_result)(client, run, r) for r in test_results)
            break
        except Exception as e:
            tries_count -= 1
            threads_count = int(threads_count * 3 / 4)

            msg = 'Can not upload Tempest results to TestRail, error: {0}'
            LOG.info(msg.format(e))

            # wait while TestRail will be ready for new iteration
            time.sleep(10)

    LOG.info('The results of Tempest tests have been uploaded.')
    LOG.info('Report URL: {0}'.format(test_plan['url']))
Beispiel #16
0
def main():
    parser = optparse.OptionParser(
        description='Publish the results of Tempest tests in TestRail')
    parser.add_option('-r',
                      '--run-name',
                      dest='run_name',
                      help='The name of a test run. '
                      'The name should describe the configuration '
                      'of the environment where Tempest tests were run')
    parser.add_option('-i', '--iso', dest='iso_number', help='ISO number')
    parser.add_option('-p',
                      '--path-to-report',
                      dest='path',
                      help='The path to the Tempest XML report')
    parser.add_option('-c',
                      '--conf',
                      dest='config',
                      default='Ubuntu 14.04',
                      help='The name of one of the configurations')
    parser.add_option('-m',
                      '--multithreading',
                      dest='threads_count',
                      default=100,
                      help='The count of threads '
                      'for uploading the test results')
    parser.add_option('-b',
                      '--block-all-tests',
                      dest='all_tests_blocked',
                      action='store_true',
                      help='Mark all Tempest tests as "blocked"')
    parser.add_option('-t',
                      '--tests-in-progress',
                      dest='tests_in_progress',
                      action='store_true',
                      help='Mark all Tempest tests as "in progress"')

    (options, args) = parser.parse_args()

    if options.run_name is None:
        raise optparse.OptionValueError('No run name was specified!')
    if options.iso_number is None:
        raise optparse.OptionValueError('No ISO number was specified!')
    if (options.path is None and not options.all_tests_blocked
            and not options.tests_in_progress):
        raise optparse.OptionValueError('No path to the Tempest '
                                        'XML report was specified!')

    # STEP #1
    # Initialize TestRail project client
    LOG.info('Initializing TestRail project client...')
    client = TestRailProject(url=TestRailSettings.url,
                             user=TestRailSettings.user,
                             password=TestRailSettings.password,
                             project=TestRailSettings.project)
    LOG.info('TestRail project client has been initialized.')

    tests_suite = client.get_suite_by_name(TestRailSettings.tests_suite)
    LOG.info('Tests suite is "{0}".'.format(tests_suite['name']))

    # STEP #2
    # Parse the test results
    if options.all_tests_blocked:
        test_results = mark_all_tests_as_blocked(client, tests_suite)
    elif options.tests_in_progress:
        test_results = mark_all_tests_as_in_progress(client, tests_suite)
    else:
        LOG.info('Parsing the test results...')
        test_results = parse_xml_report(options.path)
        LOG.info('The test results have been parsed.')

    # STEP #3
    # Create new test plan (or find existing)
    milestone = client.get_milestone_by_name(TestRailSettings.milestone)
    test_plan_name = '{0} iso #{1}'.format(milestone['name'],
                                           options.iso_number)
    LOG.info('Test plan name is "{0}".'.format(test_plan_name))

    LOG.info('Trying to find test plan "{0}"...'.format(test_plan_name))
    test_plan = client.get_plan_by_name(test_plan_name)
    if not test_plan:
        LOG.info('The test plan not found. Creating one...')
        url = '/job/{0}.all/{1}'.format(milestone['name'], options.iso_number)
        description = urlparse.urljoin(JENKINS['url'], url)
        test_plan = client.add_plan(test_plan_name,
                                    description=description,
                                    milestone_id=milestone['id'],
                                    entries=[])
        LOG.info('The test plan has been created.')
    else:
        LOG.info('The test plan found.')

    # Get ID of each OS from list "TestRailSettings.operation_systems"
    config_ids = []
    for os_name in TestRailSettings.operation_systems:
        for conf in client.get_config_by_name('Operation System')['configs']:
            if conf['name'] == os_name:
                config_ids.append(conf['id'])
                break

    # Define test runs for CentOS and Ubuntu
    run_name = 'Tempest - ' + options.run_name
    runs = []
    for conf_id in config_ids:
        run = client.test_run_struct(name=run_name,
                                     suite_id=tests_suite['id'],
                                     milestone_id=milestone['id'],
                                     description='Tempest results',
                                     config_ids=[conf_id])
        runs.append(run)

    # Create a test plan entry with the test runs
    run = find_run_by_name_and_config_in_test_plan(test_plan, run_name,
                                                   options.config)
    if not run:
        LOG.info('Adding a test plan entry with test run '
                 '"{0} ({1})" ...'.format(run_name, options.config))
        entry = client.add_plan_entry(plan_id=test_plan['id'],
                                      suite_id=tests_suite['id'],
                                      config_ids=config_ids,
                                      runs=runs,
                                      name=run_name)
        LOG.info('The test plan entry has been added.')
        run = find_run_by_config_in_test_plan_entry(entry, options.config)

    # STEP #4
    # Upload the test results to TestRail for the specified test run
    LOG.info('Uploading the test results to TestRail...')
    tries_count = 10
    threads_count = options.threads_count
    while tries_count > 0:
        try:
            joblib.Parallel(n_jobs=threads_count)(
                joblib.delayed(upload_test_result)(client, run, r)
                for r in test_results)
            break
        except Exception as e:
            tries_count -= 1
            threads_count = int(threads_count * 3 / 4)

            msg = 'Can not upload Tempest results to TestRail, error: {0}'
            LOG.info(msg.format(e))

            # wait while TestRail will be ready for new iteration
            time.sleep(10)

    LOG.info('The results of Tempest tests have been uploaded.')
    LOG.info('Report URL: {0}'.format(test_plan['url']))