def get_sub_builds(build_number, job_name=JENKINS.get('job_name'), jenkins_url=JENKINS.get('url')): """ Gather all sub build info into subbuild list :param build_number: int - Jenkins build number :param job_name: str - Jenkins job_name :param jenkins_url: str - Jenkins http url :return: sub_builds: list of dicts or None otherwise {build_info, test_data, failure_reasons} where: build_info(sub build specific info got from Jenkins)-dict test_data(test data per one sub build)-dict failure_reasons(failures per one sub build)-list """ runner_build = Build(job_name, build_number) parent_build_info = runner_build.get_build_data(depth=0) sub_builds = None if parent_build_info: sub_builds = parent_build_info.get('subBuilds') if sub_builds: for i in sub_builds: test_data = get_build_test_data(i.get('buildNumber'), i.get('jobName'), jenkins_url) if test_data: i.update({'test_data': test_data}) i.update({'description': test_data.get('job_description')}) i.update({'failure_reasons': get_build_failure_reasons(test_data)}) return sub_builds, parent_build_info
def get_tests_groups_from_jenkins(runner_name, build_number, distros): runner_build = Build(runner_name, build_number) res = {} for b in runner_build.build_data['subBuilds']: if b['result'] is None: logger.debug("Skipping '{0}' job (build #{1}) because it's still " "running...".format(b['jobName'], b['buildNumber'],)) continue # Get the test group from the console of the job z = Build(b['jobName'], b['buildNumber']) console = z.get_job_console() groups = [keyword.split('=')[1] for line in console for keyword in line.split() if 'run_system_test.py' in line and '--group=' in keyword] if not groups: logger.error("No test group found in console of the job {0}/{1}" .format(b['jobName'], b['buildNumber'])) continue # Use the last group (there can be several groups in upgrade jobs) test_group = groups[-1] # Get the job suffix job_name = b['jobName'] for distro in distros: if distro in job_name: sep = '.' + distro + '.' job_suffix = job_name.split(sep)[-1] break else: job_suffix = job_name.split('.')[-1] res[job_suffix] = test_group return res
def get_test_build(build_name, build_number, check_rebuild=False, force_rebuild_search=False): """Get test data from Jenkins job build :param build_name: string :param build_number: string :param check_rebuild: bool, if True then look for newer job rebuild(s) :param force_rebuild_search: bool, if True then force rebuild(s) search :return: dict """ test_build = Build(build_name, build_number) first_case = test_build.test_data()['suites'][0]['cases'].pop()['name'] if (force_rebuild_search or first_case == 'jenkins') and check_rebuild: iso_magnet = get_job_parameter(test_build.build_data, 'MAGNET_LINK') if not iso_magnet: return test_build latest_build_number = Build(build_name, 'latest').number builds_to_check = [i for i in range(build_number + 1, latest_build_number + 1)] if force_rebuild_search: builds_to_check.reverse() for n in builds_to_check: test_rebuild = Build(build_name, n) if get_job_parameter(test_rebuild.build_data, 'MAGNET_LINK') \ == iso_magnet: logger.debug("Found test job rebuild: " "{0}".format(test_rebuild.url)) return test_rebuild return test_build
def get_sub_builds(build_number, job_name=JENKINS.get('job_name'), jenkins_url=JENKINS.get('url')): """ Gather all sub build info into subbuild list :param build_number: int - Jenkins build number :param job_name: str - Jenkins job_name :param jenkins_url: str - Jenkins http url :return: sub_builds: list of dicts or None otherwise {build_info, test_data, failure_reasons} where: build_info(sub build specific info got from Jenkins)-dict test_data(test data per one sub build)-dict failure_reasons(failures per one sub build)-list """ runner_build = Build(job_name, build_number) parent_build_info = runner_build.get_build_data(depth=0) sub_builds = None if parent_build_info: sub_builds = parent_build_info.get('subBuilds') if sub_builds: for i in sub_builds: test_data = get_build_test_data(i.get('buildNumber'), i.get('jobName'), jenkins_url) if test_data: i.update({'test_data': test_data}) i.update({'description': test_data.get('job_description')}) i.update( {'failure_reasons': get_build_failure_reasons(test_data)}) return sub_builds, parent_build_info
def generate_test_plan_name(job_name, build_number): # Generate name of TestPlan basing on iso image name # taken from Jenkins job build parameters runner_build = Build(job_name, build_number) milestone, iso_number, prefix = get_version(runner_build.build_data) return ' '.join( filter(lambda x: bool(x), (milestone, prefix, 'iso', '#' + str(iso_number))))
def get_tests_groups_from_jenkins(runner_name, build_number, distros): runner_build = Build(runner_name, build_number) res = {} sub_builds = \ runner_build.build_data.get('subBuilds', [runner_build.build_data]) for b in sub_builds: if b['result'] is None: logger.debug("Skipping '{0}' job (build #{1}) because it's still " "running...".format(b['jobName'], b['buildNumber'],)) continue # Get the test group from the console of the job # Get the job suffix if b.get('jobName'): z = Build(b['jobName'], b['buildNumber']) console = z.get_job_console() job_name = b['jobName'] job_info = z.job_info env_vars = z.injected_vars else: console = runner_build.get_job_console() job_name = runner_build.name job_info = runner_build.job_info env_vars = runner_build.injected_vars groups = re.findall(TEST_GROUP_PATTERN, console) if not groups: logger.error("No test group found in console of the job {0}/{1}" .format(b['jobName'], b['buildNumber'])) continue # Use the last group (there can be several groups in upgrade jobs) test_group = groups[-1] for distro in distros: if distro in job_name: sep = '.' + distro + '.' job_suffix = job_name.split(sep)[-1] break else: job_suffix = job_name.split('.')[-1] res[job_suffix] = \ {'group': test_group, 'job_info': job_info, 'env_vars': env_vars} return res
def get_version_from_upstream_job(jenkins_build_data): upstream_job = get_job_parameter(jenkins_build_data, 'UPSTREAM_JOB_URL') if not upstream_job: return causes = [a['causes'] for a in jenkins_build_data['actions'] if 'causes' in a.keys()][0] if len(causes) > 0: upstream_job_name = causes[0]['upstreamProject'] upstream_build_number = causes[0]['upstreamBuild'] upstream_build = Build(upstream_job_name, upstream_build_number) return (get_version_from_artifacts(upstream_build.build_data) or get_version_from_parameters(upstream_build.build_data))
def get_tests_groups_from_jenkins(runner_name, build_number, distros): runner_build = Build(runner_name, build_number) res = {} for b in runner_build.build_data['subBuilds']: if b['result'] is None: logger.debug("Skipping '{0}' job (build #{1}) because it's still " "running...".format( b['jobName'], b['buildNumber'], )) continue # Get the test group from the console of the job z = Build(b['jobName'], b['buildNumber']) console = z.get_job_console() groups = [ keyword.split('=')[1] for line in console for keyword in line.split() if 'run_system_test.py' in line and '--group=' in keyword ] if not groups: logger.error( "No test group found in console of the job {0}/{1}".format( b['jobName'], b['buildNumber'])) continue # Use the last group (there can be several groups in upgrade jobs) test_group = groups[-1] # Get the job suffix job_name = b['jobName'] for distro in distros: if distro in job_name: sep = '.' + distro + '.' job_suffix = job_name.split(sep)[-1] break else: job_suffix = job_name.split('.')[-1] res[job_suffix] = test_group return res
def get_build_test_data(build_number, job_name, jenkins_url=JENKINS.get('url')): """ Get build test data from Jenkins from nosetests.xml :param build_number: int - Jenkins build number :param job_name: str - Jenkins job_name :param jenkins_url: str - Jenkins http url :return: test_data: dict - build info or None otherwise """ test_data = None logger.info('Getting subbuild {} {}'.format(job_name, build_number)) runner_build = Build(job_name, build_number) buildinfo = runner_build.get_build_data(depth=0) if not buildinfo: logger.error('Getting subbuilds info is failed. ' 'Job={} Build={}'.format(job_name, build_number)) return test_data try: artifact_paths = [ v for i in buildinfo.get('artifacts') for k, v in i.items() if k == 'relativePath' and v == JENKINS.get('xml_testresult_file_name') ][0] artifact_url = "/".join( [jenkins_url, 'job', job_name, str(build_number)]) xdata = get_build_artifact(artifact_url, artifact_paths) test_data = xmltodict.parse(xdata, xml_attribs=True) test_data.update({ 'build_number': build_number, 'job_name': job_name, 'job_url': buildinfo.get('url'), 'job_description': buildinfo.get('description'), 'job_status': buildinfo.get('result') }) except: test_data = None return test_data
def get_tests_groups_from_jenkins(runner_name, build_number, distros): runner_build = Build(runner_name, build_number) res = {} sub_builds = \ runner_build.build_data.get('subBuilds', [runner_build.build_data]) for b in sub_builds: if b['result'] is None: logger.debug("Skipping '{0}' job (build #{1}) because it's still " "running...".format( b['jobName'], b['buildNumber'], )) continue # Get the test group from the console of the job # Get the job suffix if b.get('jobName'): z = Build(b['jobName'], b['buildNumber']) console = z.get_job_console() job_name = b['jobName'] job_info = z.job_info env_vars = z.injected_vars else: console = runner_build.get_job_console() job_name = runner_build.name job_info = runner_build.job_info env_vars = runner_build.injected_vars groups = re.findall(TEST_GROUP_PATTERN, console) if not groups: # maybe it's failed baremetal job? # because of a design baremetal tests run pre-setup job # and when it fails there are no test groups in common meaning: # groups which could be parsed by TEST_GROUP_PATTERN baremetal_pattern = re.compile(r'Jenkins Build.*jenkins-(.*)-\d+') baremetal_groups = re.findall(baremetal_pattern, console) if not baremetal_groups: logger.error( "No test group found in console of the job {0}/{1}".format( b['jobName'], b['buildNumber'])) continue # we should get the group via jobName because the test group name # inside the log could be cut and some symbols will be changed to * groups = b['jobName'].split('.') # Use the last group (there can be several groups in upgrade jobs) test_group = groups[-1] for distro in distros: if distro in job_name: sep = '.' + distro + '.' job_suffix = job_name.split(sep)[-1] break else: job_suffix = job_name.split('.')[-1] res[job_suffix] = \ {'group': test_group, 'job_info': job_info, 'env_vars': env_vars} return res
def get_test_build(build_name, build_number, check_rebuild=False): """Get test data from Jenkins job build :param build_name: string :param build_number: string :param check_rebuild: bool, if True then look for newer job rebuild(s) :return: dict """ test_build = Build(build_name, build_number) if test_build.test_data()['suites'][0]['cases'].pop()['name'] == 'jenkins': if not check_rebuild: return test_build iso_magnet = get_job_parameter(test_build.build_data, 'MAGNET_LINK') if not iso_magnet: return test_build latest_build_number = Build(build_name, 'latest').number for n in range(build_number, latest_build_number): test_rebuild = Build(build_name, n + 1) if get_job_parameter(test_rebuild.build_data, 'MAGNET_LINK') \ == iso_magnet: logger.debug("Found test job rebuild: " "{0}".format(test_rebuild.url)) return test_rebuild return test_build
def get_build_test_data(build_number, job_name, jenkins_url=JENKINS.get('url')): """ Get build test data from Jenkins from nosetests.xml :param build_number: int - Jenkins build number :param job_name: str - Jenkins job_name :param jenkins_url: str - Jenkins http url :return: test_data: dict - build info or None otherwise """ test_data = None logger.info('Getting subbuild {} {}'.format(job_name, build_number)) runner_build = Build(job_name, build_number) buildinfo = runner_build.get_build_data(depth=0) if not buildinfo: logger.error('Getting subbuilds info is failed. ' 'Job={} Build={}'.format(job_name, build_number)) return test_data try: artifact_paths = [v for i in buildinfo.get('artifacts') for k, v in i.items() if k == 'relativePath' and v == JENKINS.get('xml_testresult_file_name')][0] artifact_url = "/".join([jenkins_url, 'job', job_name, str(build_number)]) xdata = get_build_artifact(artifact_url, artifact_paths) test_data = xmltodict.parse(xdata, xml_attribs=True) test_data.update({'build_number': build_number, 'job_name': job_name, 'job_url': buildinfo.get('url'), 'job_description': buildinfo.get('description'), 'job_status': buildinfo.get('result')}) except: test_data = None return test_data
def main(): parser = OptionParser( description="Publish results of system tests from Jenkins build to " "TestRail. See settings.py for configuration.") parser.add_option('-j', '--job-name', dest='job_name', default=None, help='Jenkins swarm runner job name') parser.add_option('-N', '--build-number', dest='build_number', default='latest', help='Jenkins swarm runner build number') parser.add_option('-o', '--one-job', dest='one_job_name', default=None, help=('Process only one job name from the specified ' 'parent job or view')) parser.add_option("-w", "--view", dest="jenkins_view", default=False, help="Get system tests jobs from Jenkins view") parser.add_option("-l", "--live", dest="live_report", action="store_true", help="Get tests results from running swarm") parser.add_option("-m", "--manual", dest="manual_run", action="store_true", help="Manually add tests cases to TestRun (tested only)") parser.add_option('-c', '--create-plan-only', action="store_true", dest="create_plan_only", default=False, help='Jenkins swarm runner job name') parser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False, help="Enable debug output") (options, _) = parser.parse_args() if options.verbose: logger.setLevel(DEBUG) if options.live_report and options.build_number == 'latest': options.build_number = 'latest_started' # STEP #1 # Initialize TestRail Project and define configuration logger.info('Initializing TestRail Project configuration...') project = TestRailProject(url=TestRailSettings.url, user=TestRailSettings.user, password=TestRailSettings.password, project=TestRailSettings.project) tests_suite = project.get_suite_by_name(TestRailSettings.tests_suite) operation_systems = [ { 'name': config['name'], 'id': config['id'], 'distro': config['name'].split()[0].lower() } for config in project.get_config_by_name('Operation System')['configs'] if config['name'] in TestRailSettings.operation_systems ] tests_results = {os['distro']: [] for os in operation_systems} # STEP #2 # Get tests results from Jenkins logger.info('Getting tests results from Jenkins...') if options.jenkins_view: jobs = get_jobs_for_view(options.jenkins_view) tests_jobs = [{'name': j, 'number': 'latest'} for j in jobs if 'system_test' in j] if \ not options.create_plan_only else [] runner_job = [j for j in jobs if 'runner' in j][0] runner_build = Build(runner_job, 'latest') elif options.job_name: runner_build = Build(options.job_name, options.build_number) tests_jobs = get_downstream_builds(runner_build.build_data) if \ not options.create_plan_only else [] else: logger.error("Please specify either Jenkins swarm runner job name (-j)" " or Jenkins view with system tests jobs (-w). Exiting..") return for systest_build in tests_jobs: if (options.one_job_name and options.one_job_name != systest_build['name']): logger.debug( "Skipping '{0}' because --one-job is specified".format( systest_build['name'])) continue if options.job_name: if 'result' not in systest_build.keys(): logger.debug("Skipping '{0}' job because it does't run tests " "(build #{1} contains no results)".format( systest_build['name'], systest_build['number'])) continue if systest_build['result'] is None: logger.debug("Skipping '{0}' job (build #{1}) because it's sti" "ll running...".format( systest_build['name'], systest_build['number'], )) continue for os in tests_results.keys(): if os in systest_build['name'].lower(): tests_results[os].extend(get_tests_results(systest_build, os)) # STEP #3 # Create new TestPlan in TestRail (or get existing) and add TestRuns milestone, iso_number, prefix = get_version(runner_build.build_data) milestone = project.get_milestone_by_name(name=milestone) test_plan_name = ' '.join( filter(lambda x: bool(x), (milestone['name'], prefix, 'iso', '#' + str(iso_number)))) test_plan = project.get_plan_by_name(test_plan_name) iso_link = '/'.join([ JENKINS['url'], 'job', '{0}.all'.format(milestone['name']), str(iso_number) ]) if not test_plan: test_plan = project.add_plan(test_plan_name, description=iso_link, milestone_id=milestone['id'], entries=[]) logger.info('Created new TestPlan "{0}".'.format(test_plan_name)) else: logger.info('Found existing TestPlan "{0}".'.format(test_plan_name)) if options.create_plan_only: return plan_entries = [] all_cases = project.get_cases(suite_id=tests_suite['id']) for os in operation_systems: cases_ids = [] if options.manual_run: all_results_groups = [r.group for r in tests_results[os['distro']]] for case in all_cases: if case['custom_test_group'] in all_results_groups: cases_ids.append(case['id']) plan_entries.append( project.test_run_struct( name='{suite_name}'.format(suite_name=tests_suite['name']), suite_id=tests_suite['id'], milestone_id=milestone['id'], description='Results of system tests ({tests_suite}) on is' 'o #"{iso_number}"'.format(tests_suite=tests_suite['name'], iso_number=iso_number), config_ids=[os['id']], include_all=True, case_ids=cases_ids)) if not any(entry['suite_id'] == tests_suite['id'] for entry in test_plan['entries']): if project.add_plan_entry( plan_id=test_plan['id'], suite_id=tests_suite['id'], config_ids=[os['id'] for os in operation_systems], runs=plan_entries): test_plan = project.get_plan(test_plan['id']) # STEP #4 # Upload tests results to TestRail logger.info('Uploading tests results to TestRail...') for os in operation_systems: logger.info('Checking tests results for "{0}"...'.format(os['name'])) results_to_publish = publish_results( project=project, milestone_id=milestone['id'], test_plan=test_plan, suite_id=tests_suite['id'], config_id=os['id'], results=tests_results[os['distro']]) logger.debug('Added new results for tests ({os}): {tests}'.format( os=os['name'], tests=[r.group for r in results_to_publish])) logger.info('Report URL: {0}'.format(test_plan['url']))
def main(): """ :param argv: command line arguments :return: None """ parser = argparse.ArgumentParser(description='Get downstream build info' ' for Jenkins swarm.runner build.' ' Generate matrix statistics:' ' (failure group -> builds & tests).' ' Publish matrix to Testrail' ' if necessary.') parser.add_argument('-n', '--build-number', type=int, required=False, dest='build_number', help='Jenkins job build number') parser.add_argument('-j', '--job-name', type=str, dest='job_name', default='9.0.swarm.runner', help='Name of Jenkins job which runs tests (runner)') parser.add_argument('-f', '--format', type=str, dest='formatfile', default='html', help='format statistics: html,json,table') parser.add_argument('-o', '--out', type=str, dest="fileoutput", default='failure_groups_statistics', help='Save statistics to file') parser.add_argument('-t', '--track', action="store_true", help='Publish statistics to TestPlan description') parser.add_argument('-q', '--quiet', action="store_true", help='Be quiet (disable logging except critical) ' 'Overrides "--verbose" option.') parser.add_argument("-v", "--verbose", action="store_true", help="Enable debug logging.") args = parser.parse_args() if args.verbose: logger.setLevel(DEBUG) if args.quiet: logger.setLevel(CRITICAL) if args.formatfile and\ args.formatfile not in ['json', 'html', 'xls', 'xlsx', 'yaml', 'csv']: logger.info('Not supported format output. Exit') return 2 if not args.build_number: runner_build = Build(args.job_name, 'latest') logger.info('Latest build number is {}. Job is {}'.format( runner_build.number, args.job_name)) args.build_number = runner_build.number logger.info('Getting subbuilds for {} {}'.format(args.job_name, args.build_number)) subbuilds, swarm_jenkins_info = get_sub_builds(args.build_number) if not subbuilds or not swarm_jenkins_info: logger.error('Necessary subbuilds info are absent. Exit') return 3 logger.info('{} Subbuilds have been found'.format(len(subbuilds))) logger.info('Calculating failure groups') failure_gd = get_global_failure_group_list(subbuilds)[0] if not failure_gd: logger.error('Necessary failure grpoup info are absent. Exit') return 4 logger.info('{} Failure groups have been found'.format(len(failure_gd))) logger.info('Getting TestRail data') testrail_testdata = get_testrail_testdata(args.job_name, args.build_number) if not testrail_testdata: logger.error('Necessary testrail info are absent. Exit') return 5 logger.info('TestRail data have been downloaded') logger.info('Getting TestRail bugs') testrail_bugs = get_bugs(subbuilds, testrail_testdata) if not testrail_bugs: logger.error('Necessary testrail bugs info are absent. Exit') return 6 logger.info('TestRail bugs have been got') logger.info('Update subbuilds data') update_subbuilds_failuregroup(subbuilds, failure_gd, testrail_testdata, testrail_bugs) logger.info('Subbuilds data have been updated') logger.info('Generating statistics across all failure groups') statistics = get_statistics(failure_gd, format_out=args.formatfile) if not statistics: logger.error('Necessary statistics info are absent. Exit') return 7 logger.info('Statistics have been generated') if args.fileoutput and args.formatfile: logger.info('Save statistics') dump_statistics(statistics, args.build_number, args.job_name, args.formatfile, args.fileoutput) logger.info('Statistics have been saved') if args.track: logger.info('Publish statistics to TestRail') if publish_statistics(statistics, args.build_number, args.job_name): logger.info('Statistics have been published') else: logger.info('Statistics have not been published' 'due to internal issue')
def main(): parser = OptionParser( description="Publish results of system tests from Jenkins build to " "TestRail. See settings.py for configuration.") parser.add_option('-j', '--job-name', dest='job_name', default=None, help='Jenkins swarm runner job name') parser.add_option('-N', '--build-number', dest='build_number', default='latest', help='Jenkins swarm runner build number') parser.add_option("-l", "--live", dest="live_report", action="store_true", help="Get tests results from running swarm") parser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False, help="Enable debug output") (options, _) = parser.parse_args() if options.verbose: logger.setLevel(DEBUG) if options.live_report and options.build_number == 'latest': build_number = 'latest_started' else: build_number = options.build_number # STEP #1 # Initialize TestRail Project and define configuration logger.info('Initializing TestRail Project configuration...') project = TestRailProject(url=TestRailSettings.url, user=TestRailSettings.user, password=TestRailSettings.password, project=TestRailSettings.project) logger.info('Initializing TestRail Project configuration... done') operation_systems = [{ 'name': config['name'], 'id': config['id'], 'distro': config['name'].split()[0].lower() } for config in project.get_config_by_name('Operation System')['configs']] os_mile = { '6.1': ['Centos 6.5', 'Ubuntu 14.04'], '6.0.1': ['Centos 6.5', 'Ubuntu 12.04'] } tests_results = {} # STEP #2 # Get tests results from Jenkins runner_build = Build(options.job_name, build_number) runs = runner_build.build_data['runs'] # Analyze each test individually for run_one in runs: if '5.1' in run_one['url']: continue # Release 5.1 to skip tests_result = get_job_info(run_one['url']) if not tests_result['description']: continue # Not completed results to skip if 'skipping' in tests_result['description']: continue # Not performed tests to skip tests_job = { 'result': tests_result['result'], 'name': (options.job_name + '/' + tests_result['url'].split('/')[-3]), 'number': int(tests_result['url'].split('/')[-2]), 'mile': (tests_result['description'].split()[0].split('-')[0]), 'iso': (int(tests_result['description'].split()[0].split('-')[1])) } if tests_job['mile'] not in tests_results: tests_results[tests_job['mile']] = {} test_mile = tests_results[tests_job['mile']] if tests_job['iso'] not in test_mile: test_mile[tests_job['iso']] = {} test_iso = test_mile[tests_job['iso']] for os in operation_systems: if os['distro'] in tests_job['name'].lower() and\ os['name'] in os_mile[tests_job['mile']]: if os['id'] not in test_iso: test_iso[os['id']] = [] test_os_id = test_iso[os['id']] test_os_id.extend(get_tests_results(tests_job, os['distro'])) # STEP #3 # Create new TestPlan in TestRail (or get existing) and add TestRuns for mile in tests_results: mile_tests_suite = '{0}{1}'.format(TestRailSettings.tests_suite, mile) logger.info(mile_tests_suite) tests_suite = project.get_suite_by_name(mile_tests_suite) milestone = project.get_milestone_by_name(name=mile) for iso_number in tests_results.get(mile, {}): # Create new TestPlan name check the same name in testrail test_plan_name = '{milestone} iso #{iso_number}'.format( milestone=milestone['name'], iso_number=iso_number) test_plan = project.get_plan_by_name(test_plan_name) if not test_plan: test_plan = project.add_plan(test_plan_name, description='/'.join([ JENKINS['url'], 'job', '{0}.all'.format( milestone['name']), str(iso_number) ]), milestone_id=milestone['id'], entries=[]) logger.info( 'Created new TestPlan "{0}".'.format(test_plan_name)) else: logger.info( 'Found existing TestPlan "{0}".'.format(test_plan_name)) plan_entries = [] # Create a test plan entry config_ids = [] for os in operation_systems: if os['name'] in os_mile[mile]: config_ids.append(os['id']) cases_ids = [] plan_entries.append( project.test_run_struct( name=tests_suite['name'], suite_id=tests_suite['id'], milestone_id=milestone['id'], description=('Results of system tests ({t_suite})' ' on iso #"{iso_number}"'.format( t_suite=tests_suite['name'], iso_number=iso_number)), config_ids=[os['id']], include_all=True, case_ids=cases_ids)) # Create a test plan entry with the test run run = find_run_by_name(test_plan, tests_suite['name']) if not run: logger.info('Adding a test plan entry with test run %s ...', tests_suite['name']) entry = project.add_plan_entry(plan_id=test_plan['id'], suite_id=tests_suite['id'], config_ids=config_ids, runs=plan_entries) logger.info('The test plan entry has been added.') run = entry['runs'][0] test_plan = project.get_plan(test_plan['id']) # STEP #4 # Upload tests results to TestRail logger.info('Uploading tests results to TestRail...') for os_id in tests_results.get(mile, {})\ .get(iso_number, {}): logger.info('Checking tests results for %s...', project.get_config(os_id)['name']) tests_added = publish_results( project=project, milestone_id=milestone['id'], test_plan=test_plan, suite_id=tests_suite['id'], config_id=os_id, results=tests_results[mile][iso_number][os_id]) logger.debug('Added new results for tests (%s): %s', project.get_config(os_id)['name'], [r.group for r in tests_added]) logger.info('Report URL: %s', test_plan['url'])
def generate_test_run_name(job_name, build_number): """ Generate name of TestRun basing on iso image name taken from Jenkins job build parameters""" runner_build = Build(job_name, build_number) milestone = get_version(runner_build.build_data)[0] return ''.join(filter(lambda x: bool(x), ('[', milestone, ']', ' Swarm')))
def get_tests_results(systest_build, os): tests_results = [] test_build = Build(systest_build['name'], systest_build['number']) run_test_data = test_build.test_data() test_classes = {} for one in run_test_data['suites'][0]['cases']: class_name = one['className'] if class_name not in test_classes: test_classes[class_name] = {} test_classes[class_name]['child'] = [] test_classes[class_name]['duration'] = 0 test_classes[class_name]["failCount"] = 0 test_classes[class_name]["passCount"] = 0 test_classes[class_name]["skipCount"] = 0 else: if one['className'] == one['name']: logger.warning("Found duplicate test in run - {}".format( one['className'])) continue test_class = test_classes[class_name] test_class['child'].append(one) test_class['duration'] += float(one['duration']) if one['status'].lower() in ('failed', 'error'): test_class["failCount"] += 1 if one['status'].lower() == 'passed': test_class["passCount"] += 1 if one['status'].lower() == 'skipped': test_class["skipCount"] += 1 for klass in test_classes: klass_result = test_classes[klass] if len(klass_result['child']) == 1: test = klass_result['child'][0] if check_untested(test): continue check_blocked(test) test_result = TestResult( name=test['name'], group=expand_test_group(test['className'], systest_build['name'], os), status=test['status'].lower(), duration='{0}s'.format(int(test['duration']) + 1), url='{0}testReport/(root)/{1}/'.format(test_build.url, test['name']), version='_'.join( [test_build.build_data["id"]] + ( test_build.build_data["description"] or test['name']).split()), description=test_build.build_data["description"] or test['name'], comments=test['skippedMessage'] ) else: case_steps = [] test_duration = sum( [float(c['duration']) for c in klass_result['child']]) steps = [c for c in klass_result['child'] if c['name'].startswith('Step')] steps = sorted(steps, key=lambda k: k['name']) test_name = steps[0]['className'] test_group = steps[0]['className'] test_comments = None is_test_failed = any([s['status'].lower() in ('failed', 'error') for s in steps]) for step in steps: if step['status'].lower() in ('failed', 'error'): case_steps.append({ "content": step['name'], "actual": step['errorStackTrace'] or step['errorDetails'], "status": step['status'].lower()}) test_comments = "{err}\n\n\n{stack}".format( err=step['errorDetails'], stack=step['errorStackTrace']) else: case_steps.append({ "content": step['name'], "actual": "pass", "status": step['status'].lower() }) test_result = TestResult( name=test_name, group=expand_test_group(test_group, systest_build['name'], os), status='failed' if is_test_failed else 'passed', duration='{0}s'.format(int(test_duration) + 1), url='{0}testReport/(root)/{1}/'.format(test_build.url, test_name), version='_'.join( [test_build.build_data["id"]] + ( test_build.build_data["description"] or test_name).split()), description=test_build.build_data["description"] or test_name, comments=test_comments, steps=case_steps, ) tests_results.append(test_result) return tests_results
def get_tests_results(systest_build, os): tests_results = [] test_build = Build(systest_build['name'], systest_build['number']) run_test_data = test_build.test_data() test_classes = {} for one in run_test_data['suites'][0]['cases']: class_name = one['className'] if class_name not in test_classes: test_classes[class_name] = {} test_classes[class_name]['child'] = [] test_classes[class_name]['duration'] = 0 test_classes[class_name]["failCount"] = 0 test_classes[class_name]["passCount"] = 0 test_classes[class_name]["skipCount"] = 0 else: if one['className'] == one['name']: logger.warning("Found duplicate test in run - {}".format( one['className'])) continue test_class = test_classes[class_name] test_class['child'].append(one) test_class['duration'] += float(one['duration']) if one['status'].lower() in ('failed', 'error'): test_class["failCount"] += 1 if one['status'].lower() == 'passed': test_class["passCount"] += 1 if one['status'].lower() == 'skipped': test_class["skipCount"] += 1 for klass in test_classes: klass_result = test_classes[klass] if len(klass_result['child']) == 1: test = klass_result['child'][0] if check_untested(test): continue check_blocked(test) test_result = TestResult( name=test['name'], group=expand_test_group(test['className'], systest_build['name'], os), status=test['status'].lower(), duration='{0}s'.format(int(test['duration']) + 1), url='{0}testReport/(root)/{1}/'.format(test_build.url, test['name']), version='_'.join([test_build.build_data["id"]] + (test_build.build_data["description"] or test['name']).split()), description=test_build.build_data["description"] or test['name'], comments=test['skippedMessage']) else: case_steps = [] test_duration = sum( [float(c['duration']) for c in klass_result['child']]) steps = [ c for c in klass_result['child'] if c['name'].startswith('Step') ] steps = sorted(steps, key=lambda k: k['name']) test_name = steps[0]['className'] test_group = steps[0]['className'] test_comments = None is_test_failed = any( [s['status'].lower() in ('failed', 'error') for s in steps]) for step in steps: if step['status'].lower() in ('failed', 'error'): case_steps.append({ "content": step['name'], "actual": step['errorStackTrace'] or step['errorDetails'], "status": step['status'].lower() }) test_comments = "{err}\n\n\n{stack}".format( err=step['errorDetails'], stack=step['errorStackTrace']) else: case_steps.append({ "content": step['name'], "actual": "pass", "status": step['status'].lower() }) test_result = TestResult( name=test_name, group=expand_test_group(test_group, systest_build['name'], os), status='failed' if is_test_failed else 'passed', duration='{0}s'.format(int(test_duration) + 1), url='{0}testReport/(root)/{1}/'.format(test_build.url, test_name), version='_'.join([test_build.build_data["id"]] + (test_build.build_data["description"] or test_name).split()), description=test_build.build_data["description"] or test_name, comments=test_comments, steps=case_steps, ) tests_results.append(test_result) return tests_results