def get_tests_groups_from_jenkins(runner_name, build_number, distros): runner_build = Build(runner_name, build_number) res = {} for b in runner_build.build_data['subBuilds']: if b['result'] is None: logger.debug("Skipping '{0}' job (build #{1}) because it's still " "running...".format(b['jobName'], b['buildNumber'],)) continue # Get the test group from the console of the job z = Build(b['jobName'], b['buildNumber']) console = z.get_job_console() groups = [keyword.split('=')[1] for line in console for keyword in line.split() if 'run_tests.py' in line and '--group=' in keyword] if not groups: logger.error("No test group found in console of the job {0}/{1}" .format(b['jobName'], b['buildNumber'])) continue # Use the last group (there can be several groups in upgrade jobs) test_group = groups[-1] # Get the job suffix job_name = b['jobName'] for distro in distros: if distro in job_name: sep = '.' + distro + '.' job_suffix = job_name.split(sep)[-1] break else: job_suffix = job_name.split('.')[-1] res[job_suffix] = test_group return res
def get_tests_groups_from_jenkins(runner_name, build_number, distros): runner_build = Build(runner_name, build_number) res = {} for b in runner_build.build_data['subBuilds']: # Get the test group from the console of the job z = Build(b['jobName'], b['buildNumber']) console = z.get_job_console() groups = [ keyword.split('=')[1] for line in console for keyword in line.split() if 'run_tests.py' in line and '--group=' in keyword ] if not groups: logger.error( "No test group found in console of the job {0}/{1}".format( b['jobName'], b['buildNumber'])) continue # Use the last group (there can be several groups in upgrade jobs) test_group = groups[-1] # Get the job suffix job_name = b['jobName'] for distro in distros: if distro in job_name: sep = '.' + distro + '.' job_suffix = job_name.split(sep)[-1] break else: job_suffix = job_name.split('.')[-1] res[job_suffix] = test_group return res
def generate_test_plan_name(job_name, build_number): # Generate name of TestPlan basing on iso image name # taken from Jenkins job build parameters runner_build = Build(job_name, build_number) milestone, iso_number, prefix = get_version(runner_build.build_data) return ' '.join(filter(lambda x: bool(x), (milestone, prefix, 'iso', '#' + str(iso_number))))
def get_version_from_parameters(jenkins_build_data): iso_link = get_job_parameter(jenkins_build_data, 'magnet_link') if iso_link: match = re.search(r'.*\bfuel-(?P<prefix>\w*)-?(?P<version>\d+' '(?P<version2>\.\d+)+)-(?P<buildnum>\d+)-.*', iso_link) if match: return (match.group('version'), int(match.group('buildnum')), match.group('prefix')) upstream_job = get_job_parameter(jenkins_build_data, 'UPSTREAM_JOB_URL') if upstream_job: causes = [a['causes'] for a in jenkins_build_data['actions'] if 'causes' in a.keys()][0] if len(causes) > 0: upstream_job_name = causes[0]['upstreamProject'] upstream_build_number = causes[0]['upstreamBuild'] upstream_build = Build(upstream_job_name, upstream_build_number) return get_version_from_artifacts(upstream_build.build_data) custom_version = get_job_parameter(jenkins_build_data, 'CUSTOM_VERSION') if custom_version: swarm_timestamp = jenkins_build_data['timestamp'] / 1000 \ if 'timestamp' in jenkins_build_data else None return (TestRailSettings.milestone, time.strftime("%D %H:%M", time.localtime(swarm_timestamp)), custom_version)
def report_build(document, name, number='latest'): """If number='latest', report last completed build. """ page = document.get_page(page_for_build(name)) build = Build(name, number) if page.build_exists(build.number): # We have to use 'build.number' and not 'number' variable # here, because number can be specified as 'latest'. # build.number is properly resolved in Build.__init__() logger.debug("Build {0} exists".format(build.number)) return None page.add_build(build.build_record())
def report_build(document, name, number="latest"): """If number='latest', report last completed build. """ page = document.get_page(page_for_build(name)) build = Build(name, number) if page.build_exists(build.number): # We have to use 'build.number' and not 'number' variable # here, because number can be specified as 'latest'. # build.number is properly resolved in Build.__init__() logger.debug("Build {0} exists".format(build.number)) return None page.add_build(build.build_record())
def get_tests_results(systest_build, os): tests_results = [] test_build = Build(systest_build['name'], systest_build['number']) for test in test_build.test_data()['suites'][0]['cases']: test_result = TestResult( name=test['name'], group=expand_test_group(test['className'], systest_build['name'], os), status=test['status'].lower(), duration='{0}s'.format(int(test['duration']) + 1), url='{0}testReport/(root)/{1}/'.format(test_build.url, test['name']), version='_'.join([test_build.build_data["id"]] + ( test_build.build_data["description"] or test['name']).split()), description=test_build.build_data["description"] or test['name'], ) tests_results.append(test_result) return tests_results
def get_tests_results(systest_build): tests_results = [] test_build = Build(systest_build['name'], systest_build['number']) for test in test_build.test_data()['suites'][0]['cases']: test_result = TestResult( name=test['name'], group=test['className'], status=test['status'].lower(), duration='{0}s'.format(int(test['duration']) + 1), url='{0}testReport/(root)/{1}/'.format(test_build.url, test['name']), version='_'.join([test_build.build_data["id"]] + (test_build.build_data["description"] or test['name']).split()), description=test_build.build_data["description"] or test['name'], ) tests_results.append(test_result) return tests_results
def get_version_from_upstream_job(jenkins_build_data): upstream_job = get_job_parameter(jenkins_build_data, 'UPSTREAM_JOB_URL') if not upstream_job: return causes = [ a['causes'] for a in jenkins_build_data['actions'] if 'causes' in a.keys() ][0] if len(causes) > 0: upstream_job_name = causes[0]['upstreamProject'] upstream_build_number = causes[0]['upstreamBuild'] upstream_build = Build(upstream_job_name, upstream_build_number) return (get_version_from_artifacts(upstream_build.build_data) or get_version_from_parameters(upstream_build.build_data))
def run_simulation(manifest_dir: str, build: Build): """Run one simulation build and test.""" results = default_junit_results expect = f"\"{build.success}\"" script = [ ["../init-build.sh"] + build.settings_args(), ["ninja"], ["bash", "-c", f"expect -c 'spawn ./simulate; set timeout 3000; expect {expect}' | tee {results}"] ] return run_build_script(manifest_dir, build.name, script, junit=True)
def get_version_from_parameters(jenkins_build_data): iso_link = get_job_parameter(jenkins_build_data, 'magnet_link') if iso_link: match = re.search( r'.*\bfuel-(?P<prefix>\w*)-?(?P<version>\d+' '(?P<version2>\.\d+)+)-(?P<buildnum>\d+)-.*', iso_link) if match: return (match.group('version'), int(match.group('buildnum')), match.group('prefix')) upstream_job = get_job_parameter(jenkins_build_data, 'UPSTREAM_JOB_URL') if upstream_job: causes = [ a['causes'] for a in jenkins_build_data['actions'] if 'causes' in a.keys() ][0] if len(causes) > 0: upstream_job_name = causes[0]['upstreamProject'] upstream_build_number = causes[0]['upstreamBuild'] upstream_build = Build(upstream_job_name, upstream_build_number) return get_version_from_artifacts(upstream_build.build_data)
def main(): parser = OptionParser( description="Publish results of system tests from Jenkins build to " "TestRail. See settings.py for configuration.") parser.add_option('-j', '--job-name', dest='job_name', default=None, help='Jenkins swarm runner job name') parser.add_option('-N', '--build-number', dest='build_number', default='latest', help='Jenkins swarm runner build number') parser.add_option('-o', '--one-job', dest='one_job_name', default=None, help=('Process only one job name from the specified ' 'parent job or view')) parser.add_option("-w", "--view", dest="jenkins_view", default=False, help="Get system tests jobs from Jenkins view") parser.add_option("-l", "--live", dest="live_report", action="store_true", help="Get tests results from running swarm") parser.add_option("-m", "--manual", dest="manual_run", action="store_true", help="Manually add tests cases to TestRun (tested only)") parser.add_option("-s", "--statistics", action="store_true", dest="bug_statistics", default=False, help="Make a statistics for bugs linked to TestRail for " "the test run") parser.add_option('-c', '--create-plan-only', action="store_true", dest="create_plan_only", default=False, help='Jenkins swarm runner job name') parser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False, help="Enable debug output") (options, args) = parser.parse_args() if options.verbose: logger.setLevel(DEBUG) if options.live_report and options.build_number == 'latest': options.build_number = 'latest_started' # STEP #1 # Initialize TestRail Project and define configuration logger.info('Initializing TestRail Project configuration...') project = TestRailProject(url=TestRailSettings.url, user=TestRailSettings.user, password=TestRailSettings.password, project=TestRailSettings.project) tests_suite = project.get_suite_by_name(TestRailSettings.tests_suite) operation_systems = [ { 'name': config['name'], 'id': config['id'], 'distro': config['name'].split()[0].lower() } for config in project.get_config_by_name('Operation System')['configs'] if config['name'] in TestRailSettings.operation_systems ] tests_results = {os['distro']: [] for os in operation_systems} # STEP #2 # Get tests results from Jenkins logger.info('Getting tests results from Jenkins...') if options.jenkins_view: jobs = get_jobs_for_view(options.jenkins_view) tests_jobs = [{'name': j, 'number': 'latest'} for j in jobs if 'system_test' in j] if \ not options.create_plan_only else [] runner_job = [j for j in jobs if 'runner' in j][0] runner_build = Build(runner_job, 'latest') elif options.job_name: runner_build = Build(options.job_name, options.build_number) tests_jobs = get_downstream_builds(runner_build.build_data) if \ not options.create_plan_only else [] else: logger.error("Please specify either Jenkins swarm runner job name (-j)" " or Jenkins view with system tests jobs (-w). Exiting..") return is_running_builds = False for systest_build in tests_jobs: if (options.one_job_name and options.one_job_name != systest_build['name']): logger.debug( "Skipping '{0}' because --one-job is specified".format( systest_build['name'])) continue if options.job_name: if 'result' not in systest_build.keys(): logger.debug("Skipping '{0}' job because it does't run tests " "(build #{1} contains no results)".format( systest_build['name'], systest_build['number'])) continue if systest_build['result'] is None: logger.debug("Skipping '{0}' job (build #{1}) because it's sti" "ll running...".format( systest_build['name'], systest_build['number'], )) is_running_builds = True continue for os in tests_results.keys(): if os in systest_build['name'].lower(): tests_results[os].extend(get_tests_results(systest_build, os)) # STEP #3 # Create new TestPlan in TestRail (or get existing) and add TestRuns milestone, iso_number, prefix = get_version(runner_build.build_data) milestone = project.get_milestone_by_name(name=milestone) test_plan_name = ' '.join( filter(lambda x: bool(x), (milestone['name'], prefix, 'iso', '#' + str(iso_number)))) test_plan = project.get_plan_by_name(test_plan_name) if not test_plan: test_plan = project.add_plan(test_plan_name, description='/'.join([ JENKINS['url'], 'job', '{0}.all'.format(milestone['name']), str(iso_number) ]), milestone_id=milestone['id'], entries=[]) logger.info('Created new TestPlan "{0}".'.format(test_plan_name)) else: logger.info('Found existing TestPlan "{0}".'.format(test_plan_name)) if options.create_plan_only: return plan_entries = [] all_cases = project.get_cases(suite_id=tests_suite['id']) for os in operation_systems: cases_ids = [] if options.manual_run: all_results_groups = [r.group for r in tests_results[os['distro']]] for case in all_cases: if case['custom_test_group'] in all_results_groups: cases_ids.append(case['id']) plan_entries.append( project.test_run_struct( name='{suite_name}'.format(suite_name=tests_suite['name']), suite_id=tests_suite['id'], milestone_id=milestone['id'], description='Results of system tests ({tests_suite}) on is' 'o #"{iso_number}"'.format(tests_suite=tests_suite['name'], iso_number=iso_number), config_ids=[os['id']], include_all=True, case_ids=cases_ids)) if not any(entry['suite_id'] == tests_suite['id'] for entry in test_plan['entries']): if project.add_plan_entry( plan_id=test_plan['id'], suite_id=tests_suite['id'], config_ids=[os['id'] for os in operation_systems], runs=plan_entries): test_plan = project.get_plan(test_plan['id']) # STEP #4 # Upload tests results to TestRail logger.info('Uploading tests results to TestRail...') for os in operation_systems: logger.info('Checking tests results for "{0}"...'.format(os['name'])) results_to_publish = publish_results( project=project, milestone_id=milestone['id'], test_plan=test_plan, suite_id=tests_suite['id'], config_id=os['id'], results=tests_results[os['distro']]) logger.debug('Added new results for tests ({os}): {tests}'.format( os=os['name'], tests=[r.group for r in results_to_publish])) logger.info('Report URL: {0}'.format(test_plan['url'])) # STEP #5 # Provide the bugs linked in TestRail for current run as a short statistics if options.bug_statistics: if is_running_builds: logger.info("Some jobs are still running. " "Skipping bug statistics report, please try later.") else: logger.info("Generating a bug statistics report...") make_bug_statistics(tests_results, operation_systems)
def get_tests_results(systest_build, os): tests_results = [] test_build = Build(systest_build['name'], systest_build['number']) run_test_data = test_build.test_data() test_classes = {} for one in run_test_data['suites'][0]['cases']: className = one['className'] if className not in test_classes: test_classes[className] = {} test_classes[className]['child'] = [] test_classes[className]['duration'] = 0 test_classes[className]["failCount"] = 0 test_classes[className]["passCount"] = 0 test_classes[className]["skipCount"] = 0 else: if one['className'] == one['name']: logger.warning("Found duplicate test in run - {}".format( one['className'])) continue test_class = test_classes[className] test_class['child'].append(one) test_class['duration'] += float(one['duration']) if one['status'].lower() in ('failed', 'error'): test_class["failCount"] += 1 if one['status'].lower() == 'passed': test_class["passCount"] += 1 if one['status'].lower() == 'skipped': test_class["skipCount"] += 1 for klass in test_classes: klass_result = test_classes[klass] if len(klass_result['child']) == 1: test = klass_result['child'][0] if check_untested(test): continue check_blocked(test) test_result = TestResult( name=test['name'], group=expand_test_group(test['className'], systest_build['name'], os), status=test['status'].lower(), duration='{0}s'.format(int(test['duration']) + 1), url='{0}testReport/(root)/{1}/'.format(test_build.url, test['name']), version='_'.join( [test_build.build_data["id"]] + ( test_build.build_data["description"] or test['name']).split()), description=test_build.build_data["description"] or test['name'], comments=test['skippedMessage'] ) else: case_steps = [] test_duration = sum( [float(c['duration']) for c in klass_result['child']]) steps = [c for c in klass_result['child'] if c['name'].startswith('Step')] steps = sorted(steps, key=lambda k: k['name']) test_name = steps[0]['className'] test_group = steps[0]['className'] test_comments = None is_test_failed = any([s['status'].lower() in ('failed', 'error') for s in steps]) for step in steps: if step['status'].lower() in ('failed', 'error'): case_steps.append({ "content": step['name'], "actual": step['errorStackTrace'] or step['errorDetails'], "status": step['status'].lower()}) test_comments = "{err}\n\n\n{stack}".format( err=step['errorDetails'], stack=step['errorStackTrace']) else: case_steps.append({ "content": step['name'], "actual": "pass", "status": step['status'].lower() }) test_result = TestResult( name=test_name, group=expand_test_group(test_group, systest_build['name'], os), status='failed' if is_test_failed else 'passed', duration='{0}s'.format(int(test_duration) + 1), url='{0}testReport/(root)/{1}/'.format(test_build.url, test_name), version='_'.join( [test_build.build_data["id"]] + ( test_build.build_data["description"] or test_name).split()), description=test_build.build_data["description"] or test_name, comments=test_comments, steps=case_steps, ) tests_results.append(test_result) return tests_results
def main(): parser = OptionParser( description="Publish results of system tests from Jenkins build to " "TestRail. See settings.py for configuration." ) parser.add_option('-j', '--job-name', dest='job_name', default=None, help='Jenkins swarm runner job name') parser.add_option('-N', '--build-number', dest='build_number', default='latest', help='Jenkins swarm runner build number') parser.add_option("-l", "--live", dest="live_report", action="store_true", help="Get tests results from running swarm") parser.add_option("-v", "--verbose", action="store_true", dest="verbose", default=False, help="Enable debug output") (options, args) = parser.parse_args() if options.verbose: logger.setLevel(DEBUG) if options.live_report and options.build_number == 'latest': build_number = 'latest_started' else: build_number = options.build_number # STEP #1 # Initialize TestRail Project and define configuration logger.info('Initializing TestRail Project configuration...') project = TestRailProject(url=TestRailSettings.url, user=TestRailSettings.user, password=TestRailSettings.password, project=TestRailSettings.project) logger.info('Initializing TestRail Project configuration... done') operation_systems = [{'name': config['name'], 'id': config['id'], 'distro': config['name'].split()[0].lower()} for config in project.get_config_by_name( 'Operation System')['configs']] os_mile = {'6.1': ['Centos 6.5', 'Ubuntu 14.04'], '6.0.1': ['Centos 6.5', 'Ubuntu 12.04']} tests_results = {} # STEP #2 # Get tests results from Jenkins runner_build = Build(options.job_name, build_number) runs = runner_build.build_data['runs'] # Analyze each test individually for run_one in runs: if '5.1' in run_one['url']: continue # Release 5.1 to skip tests_result = get_job_info(run_one['url']) if not tests_result['description']: continue # Not completed results to skip if 'skipping' in tests_result['description']: continue # Not performed tests to skip tests_job = {'result': tests_result['result'], 'name': (options.job_name + '/' + tests_result['url'].split('/')[-3]), 'number': int(tests_result['url'].split('/')[-2]), 'mile': (tests_result['description']. split()[0].split('-')[0]), 'iso': (int(tests_result['description']. split()[0].split('-')[1]))} if tests_job['mile'] not in tests_results: tests_results[tests_job['mile']] = {} test_mile = tests_results[tests_job['mile']] if tests_job['iso'] not in test_mile: test_mile[tests_job['iso']] = {} test_iso = test_mile[tests_job['iso']] for os in operation_systems: if os['distro'] in tests_job['name'].lower() and\ os['name'] in os_mile[tests_job['mile']]: if os['id'] not in test_iso: (test_iso[os['id']]) = [] test_os_id = test_iso[os['id']] test_os_id.extend(get_tests_results(tests_job)) # STEP #3 # Create new TestPlan in TestRail (or get existing) and add TestRuns for mile in tests_results: mile_tests_suite = '{0}{1}'.format(TestRailSettings.tests_suite, mile) logger.info(mile_tests_suite) tests_suite = project.get_suite_by_name(mile_tests_suite) milestone = project.get_milestone_by_name(name=mile) for iso_number in tests_results.get(mile, {}): # Create new TestPlan name check the same name in testrail test_plan_name = '{milestone} iso #{iso_number}'.format( milestone=milestone['name'], iso_number=iso_number) test_plan = project.get_plan_by_name(test_plan_name) if not test_plan: test_plan = project.add_plan( test_plan_name, description='/'.join([JENKINS['url'], 'job', '{0}.all'.format(milestone['name']), str(iso_number)]), milestone_id=milestone['id'], entries=[]) logger.info('Created new TestPlan "{0}".' .format(test_plan_name)) else: logger.info('Found existing TestPlan "{0}".' .format(test_plan_name)) plan_entries = [] # Create a test plan entry config_ids = [] for os in operation_systems: if os['name'] in os_mile[mile]: config_ids.append(os['id']) cases_ids = [] plan_entries.append( project.test_run_struct( name=tests_suite['name'], suite_id=tests_suite['id'], milestone_id=milestone['id'], description=('Results of system tests ({t_suite})' ' on iso #"{iso_number}"' .format(t_suite=tests_suite['name'], iso_number=iso_number)), config_ids=[os['id']], include_all=True, case_ids=cases_ids)) # Create a test plan entry with the test run run = find_run_by_name(test_plan, tests_suite['name']) if not run: logger.info('Adding a test plan entry with test run %s ...', tests_suite['name']) entry = project.add_plan_entry(plan_id=test_plan['id'], suite_id=tests_suite['id'], config_ids=config_ids, runs=plan_entries) logger.info('The test plan entry has been added.') run = entry['runs'][0] test_plan = project.get_plan(test_plan['id']) # STEP #4 # Upload tests results to TestRail logger.info('Uploading tests results to TestRail...') for os_id in tests_results.get(mile, {})\ .get(iso_number, {}): logger.info('Checking tests results for %s...', project.get_config(os_id)['name']) tests_added = publish_results( project=project, milestone_id=milestone['id'], test_plan=test_plan, suite_id=tests_suite['id'], config_id=os_id, results=tests_results[mile][iso_number][os_id]) logger.debug('Added new results for tests (%s): %s', project.get_config(os_id)['name'], [r.group for r in tests_added]) logger.info('Report URL: %s', test_plan['url'])