def trigger_jobs(buildername, revision, back_revisions=30, times=30, dry_run=False): buildername = sanitize_buildername(buildername) repo_url = query_repo_url_from_buildername(buildername) repo_name = query_repo_name_from_buildername(buildername) if back_revisions >= 0: # find the revision *back_revisions* before the one we got push_info = query_revision_info(repo_url, revision) end_id = int(push_info["pushid"]) # newest revision start_id = end_id - back_revisions revlist = query_pushid_range(repo_url=repo_url, start_id=start_id, end_id=end_id) revision = revlist[-1] requests = \ trigger_job(revision, buildername, times=times, dry_run=dry_run) if any(req.status_code != 202 for req in requests): LOG.warn('WARNING: not all requests succeded') return ('https://treeherder.mozilla.org/#/jobs?%s' % urllib.urlencode({ 'repo': repo_name, 'revision': revision, 'filter-searchStr': buildername }))
def main(): options = parse_args() if not valid_credentials(): sys.exit(-1) if options.debug: LOG = setup_logging(logging.DEBUG) else: LOG = setup_logging(logging.INFO) if options.rev == 'tip': repo_url = query_repo_url(options.repo) options.rev = query_repo_tip(repo_url) LOG.info("The tip of %s is %s", options.repo, options.rev) filters_in = options.includes.split(',') + [options.repo] filters_out = [] if options.exclude: filters_out = options.exclude.split(',') buildernames = filter_buildernames( buildernames=query_builders(repo_name=options.repo), include=filters_in, exclude=filters_out) if len(buildernames) == 0: LOG.info("0 jobs match these filters, please try again.") return cont = raw_input( "%i jobs will be triggered, do you wish to continue? y/n/d (d=show details) " % len(buildernames)) if cont.lower() == 'd': LOG.info("The following jobs will be triggered: \n %s" % '\n'.join(buildernames)) cont = raw_input("Do you wish to continue? y/n ") if cont.lower() != 'y': exit(1) # Setting the QUERY_SOURCE global variable in mozci.py set_query_source(options.query_source) for buildername in buildernames: trigger_range( buildername=buildername, revisions=[options.rev], times=options.times, dry_run=options.dry_run, ) LOG.info('https://treeherder.mozilla.org/#/jobs?%s' % urllib.urlencode( { 'repo': query_repo_name_from_buildername(buildername), 'fromchange': options.rev, 'tochange': options.rev, 'filter-searchStr': buildername }))
def check_repository(buildername): """Function to check if the repository in buildername matches the supported repositories.""" supported_repositories = ['fx-team', 'mozilla-inbound', 'mozilla-aurora'] repo_name = query_repo_name_from_buildername(buildername) if repo_name not in supported_repositories: raise Exception('The script supports only for fx-team, mozilla-inbound, mozilla-aurora') return repo_name
def main(): options = parse_args() repo_url = query_repo_url(options.repo) if not valid_credentials(): sys.exit(-1) if options.debug: LOG = setup_logging(logging.DEBUG) else: LOG = setup_logging(logging.INFO) if options.rev == 'tip': revision = query_repo_tip(repo_url) LOG.info("The tip of %s is %s", options.repo, revision) else: revision = query_full_revision_info(repo_url, options.rev) filters_in = options.includes.split(',') + [options.repo] filters_out = [] if options.exclude: filters_out = options.exclude.split(',') buildernames = filter_buildernames( buildernames=query_builders(repo_name=options.repo), include=filters_in, exclude=filters_out ) if len(buildernames) == 0: LOG.info("0 jobs match these filters, please try again.") return cont = raw_input("%i jobs will be triggered, do you wish to continue? y/n/d (d=show details) " % len(buildernames)) if cont.lower() == 'd': LOG.info("The following jobs will be triggered: \n %s" % '\n'.join(buildernames)) cont = raw_input("Do you wish to continue? y/n ") if cont.lower() != 'y': exit(1) # Setting the QUERY_SOURCE global variable in mozci.py set_query_source(options.query_source) for buildername in buildernames: trigger_range( buildername=buildername, revisions=[revision], times=options.times, dry_run=options.dry_run, ) LOG.info('https://treeherder.mozilla.org/#/jobs?%s' % urllib.urlencode({'repo': query_repo_name_from_buildername(buildername), 'fromchange': revision, 'tochange': revision, 'filter-searchStr': buildername}))
def check_repository(buildername): """Function to check if the repository in buildername matches the supported repositories.""" supported_repositories = ['fx-team', 'mozilla-inbound', 'mozilla-aurora'] repo_name = query_repo_name_from_buildername(buildername) if repo_name not in supported_repositories: raise Exception('The script supports only for fx-team, mozilla-inbound, ' 'mozilla-aurora') return repo_name
def getSuccessfulJobs(revision, buildername): "This function returns the number of data points for an alert." # Query TH client get_jobs method to get all jobs for a particular buildername # Then Query mozci function: https://github.com/armenzg/mozilla_ci_tools/blob/master/mozci/query_jobs.py#L187 # to get the status of job for each job treeherder_api = TreeherderApi() repo_name = query_repo_name_from_buildername(buildername) matching_jobs = treeherder_api.get_matching_jobs(repo_name, revision, buildername) successful_jobs = 0 for job in matching_jobs: status = treeherder_api.get_job_status(job) if status == SUCCESS: successful_jobs += 1 return successful_jobs
def query_jobs_buildername(buildername, revision): """Return **status** information for a buildername on a given revision.""" # NOTE: It's unfortunate that there is scheduling and status data. # I think we might need to remove this distinction for the user's # sake. status_info = [] repo_name = query_repo_name_from_buildername(buildername) query_api = BuildApi() jobs = query_api.get_matching_jobs(repo_name, revision, buildername) # The user wants the status data rather than the scheduling data for job_schedule_info in jobs: status_info.append(_status_info(job_schedule_info)) return status_info
def main(): options = parse_args() if options.debug: LOG.setLevel(logging.DEBUG) logging.getLogger("requests").setLevel(logging.DEBUG) LOG.info("Setting DEBUG level") else: LOG.setLevel(logging.INFO) # requests is too noisy and adds no value logging.getLogger("requests").setLevel(logging.WARNING) filters_in = options.includes.split(' ') + [options.repo] buildernames = query_builders() for word in filters_in: buildernames = filter(lambda x: word in x, buildernames) if options.exclude: filters_out = options.exclude.split(' ') for word in filters_out: buildernames = filter(lambda x: word not in x, buildernames) if len(buildernames) > options.lim: LOG.info('There %i matching buildernames, the limit is %i. If you really want' 'to trigger everything, try again with --limit %i.' % (len(buildernames), options.lim, options.lim)) exit(1) for buildername in buildernames: trigger_range( buildername=buildername, revisions=[options.rev], times=options.times, dry_run=options.dry_run, ) LOG.info('https://treeherder.mozilla.org/#/jobs?%s' % urllib.urlencode({'repo': query_repo_name_from_buildername(buildername), 'fromchange': options.rev, 'tochange': options.rev, 'filter-searchStr': buildername}))
def main(): options = parse_args() if options.debug: LOG.setLevel(logging.DEBUG) logging.getLogger("requests").setLevel(logging.DEBUG) LOG.info("Setting DEBUG level") else: LOG.setLevel(logging.INFO) # requests is too noisy and adds no value logging.getLogger("requests").setLevel(logging.WARNING) filters_in = options.includes.split(',') + [options.repo] filters_out = [] if options.exclude: filters_out = options.exclude.split(',') buildernames = filter_buildernames(filters_in, filters_out, query_builders()) cont = raw_input("%i jobs will be triggered, do you wish to continue? y/n/d (d=show details) " % len(buildernames)) if cont.lower() == 'd': LOG.info("The following jobs will be triggered: \n %s" % '\n'.join(buildernames)) cont = raw_input("Do you wish to continue? y/n ") if cont.lower() != 'y': exit(1) for buildername in buildernames: trigger_range( buildername=buildername, revisions=[options.rev], times=options.times, dry_run=options.dry_run, ) LOG.info('https://treeherder.mozilla.org/#/jobs?%s' % urllib.urlencode({'repo': query_repo_name_from_buildername(buildername), 'fromchange': options.rev, 'tochange': options.rev, 'filter-searchStr': buildername}))
def main(): options = parse_args() if options.debug: LOG = setup_logging(logging.DEBUG) else: LOG = setup_logging(logging.INFO) validate_options(options) if not valid_credentials(): sys.exit(-1) # Setting the QUERY_SOURCE global variable in mozci.py set_query_source(options.query_source) if options.buildernames: options.buildernames = sanitize_buildernames(options.buildernames) repo_url = query_repo_url_from_buildername(options.buildernames[0]) if not options.repo_name: repo_name = query_repo_name_from_buildername(options.buildernames[0]) else: repo_name = options.repo_name repo_url = query_repo_url(repo_name) if options.rev == 'tip': revision = query_repo_tip(repo_url).changesets[0].node LOG.info("The tip of %s is %s", repo_name, revision) else: revision = query_push_by_revision(repo_url, options.rev, return_revision_list=True) # Schedule jobs through TaskCluster if --taskcluster option has been set to true if options.taskcluster: mgr = TaskClusterBuildbotManager() else: mgr = BuildAPIManager() trigger_build_if_missing = options.trigger_build_if_missing if repo_name == 'try': trigger_build_if_missing = False # Mode 1: Trigger coalesced jobs if options.coalesced: query_api = BuildApi() request_ids = query_api.find_all_jobs_by_status(repo_name, revision, COALESCED) if len(request_ids) == 0: LOG.info('We did not find any coalesced job') for request_id in request_ids: make_retrigger_request(repo_name=repo_name, request_id=request_id, auth=get_credentials(), dry_run=options.dry_run) return # Mode #2: Fill-in a revision or trigger_test_jobs_only if options.fill_revision or options.trigger_tests_only: mgr.trigger_missing_jobs_for_revision( repo_name=repo_name, revision=revision, dry_run=options.dry_run, trigger_build_if_missing=not options.trigger_tests_only ) return # Mode #3: Trigger jobs based on revision list modifiers if not (options.includes or options.exclude or options.failed_jobs): job_names = options.buildernames # Mode 4 - Schedule every builder matching --includes and does not match --exclude. elif options.includes or options.exclude: filters_in = options.includes.split(',') + [repo_name] filters_out = [] if options.exclude: filters_out = options.exclude.split(',') job_names = filter_buildernames( buildernames=query_builders(repo_name=repo_name), include=filters_in, exclude=filters_out ) if len(job_names) == 0: LOG.info("0 jobs match these filters. please try again.") return if options.existing_only: # We query all succesful jobs for a given revision and filter # them by include/exclude filters. trigger_build_if_missing = False successful_jobs = TreeherderApi().find_all_jobs_by_status( repo_name=repo_name, revision=revision, status=SUCCESS) # We will filter out all the existing job from those successful job we have. job_names = [buildername for buildername in successful_jobs if buildername in job_names] cont = raw_input("The ones which have existing builds out of %i jobs will be triggered,\ do you wish to continue? y/n/d (d=show details) " % len(job_names)) else: cont = raw_input("%i jobs will be triggered, do you wish to continue? \ y/n/d (d=show details) " % len(job_names)) if cont.lower() == 'd': LOG.info("The following jobs will be triggered: \n %s" % '\n'.join(job_names)) cont = raw_input("Do you wish to continue? y/n ") if cont.lower() != 'y': exit(1) # Mode 5: Use --failed-jobs to trigger jobs for particular revision elif options.failed_jobs: job_names = TreeherderApi().find_all_jobs_by_status( repo_name=repo_name, revision=revision, status=WARNING) for buildername in job_names: revlist = determine_revlist( repo_url=repo_url, buildername=buildername, rev=revision, back_revisions=options.back_revisions, delta=options.delta, from_rev=options.from_rev, backfill=options.backfill, skips=options.skips, max_revisions=options.max_revisions) _print_treeherder_link( revlist=revlist, repo_name=repo_name, buildername=buildername, revision=revision, log=LOG, includes=options.includes, exclude=options.exclude) try: mgr.trigger_range( buildername=buildername, repo_name=repo_name, revisions=revlist, times=options.times, dry_run=options.dry_run, files=options.files, trigger_build_if_missing=trigger_build_if_missing ) except Exception, e: LOG.exception(e) exit(1)
def main(): alerts = getAlerts() for alert in alerts: # new alert if alert['stage'] == 0: if checkMerge(alert['revision'], alert['buildername']) or 'pgo' in alert['buildername']: LOG.info("We are ignoring this alert since it is either a merge or a pgo job.") alert['stage'] = -1 # We need to have manual inspection in this case. alert['user'] = '******' updateAlert(alert['id'], alert['revision'], alert['buildername'], alert['test'], alert['stage'], alert['loop'], alert['user']) else: alert['stage'] = 1 # trigger jobs for backfill if alert['stage'] == 1: LOG.info("We are in stage 1, and going to backfill jobs.") revisionList = getRevisions(alert['revision'], alert['buildername'], start=-2, end=2) trigger_range(alert['buildername'], revisionList, times=6, dry_run=DRY_RUN) alert['stage'] = 2 # We want some time interval between stage 1 and 2, so we exit. updateAlert(alert['id'], alert['revision'], alert['buildername'], alert['test'], alert['stage'], alert['loop'], alert['user']) continue # verify jobs for backfill if alert['stage'] == 2: LOG.info("We are in stage 2, and going to verify if jobs are backfilled.") revisionList = getRevisions(alert['revision'], alert['buildername'], start=-2, end=2) for revision in revisionList: dataPoints = getSuccessfulJobs(revision, alert['buildername']) # If dataPoints are less than 6, it means that builds/jobs are still running. if dataPoints < 6: # We wait for 6 hours for all triggered tests to complete, # And if they don't then we mark them for manual intervention/ alert['loop'] += 1 if alert['loop'] > (TIME_TO_BUILD + TIME_TO_TEST + PENDING_TIME) / CYCLE_TIME: alert['stage'] = -1 alert['user'] = '******' else: alert['stage'] = 1 break if alert['stage'] != 2: updateAlert(alert['id'], alert['revision'], alert['buildername'], alert['test'], alert['stage'], alert['loop'], alert['user']) continue badRevisions = [] # Reset the loop for upcoming stages alert['loop'] = 0 for i in range(1, len(revisionList)): results = compare(alert['test'], alert['buildername'], revisionList[i], revisionList[i-1]) if results < -2.0: badRevisions.append(revisionList[i]) if len(badRevisions) != 1: alert['stage'] = -1 # too noisy, something bad happened alert['user'] = '******' updateAlert(alert['id'], alert['revision'], alert['buildername'], alert['test'], alert['stage'], alert['loop'], alert['user']) continue if checkMerge(badRevisions[0], alert['buildername']): alert['stage'] = -1 # A merge revision is a bad revision, manually inspect alert['user'] = '******' if alert['revision'] != badRevisions[0]: alert['revision'] = badRevisions[0] # we misreported initially, change the actual regression revision alert['stage'] = 3 # Trigger all talos stage if alert['stage'] == 3: LOG.info("We are in stage 3, and going to trigger all_talos jobs.") repo_name = query_repo_name_from_buildername(alert['buildername']) trigger_all_talos_jobs(repo_name, alert['revision'], times=6, dry_run=DRY_RUN) previousRevision = getRevisions(alert['revision'], alert['buildername'], start=-1, end=-1)[0] trigger_all_talos_jobs(repo_name, previousRevision, times=6, dry_run=DRY_RUN) alert['stage'] = 4 updateAlert(alert['id'], alert['revision'], alert['buildername'], alert['test'], alert['stage'], alert['loop'], alert['user']) continue # Verify All talos stage is completed if alert['stage'] == 4: LOG.info("We are in stage 4, and going to verify if all_talos ran successfully.") previousRevision = getRevisions(alert['revision'], alert['buildername'], start=-1, end=-1)[0] repo_name = query_repo_name_from_buildername(alert['buildername']) all_buildernames = build_talos_buildernames_for_repo(repo_name) for revision in [alert['revision'], previousRevision]: for buildername in all_buildernames: dataPoints = getSuccessfulJobs(revision, buildername) if dataPoints < 6: # We wait for 8 hours for all talos tests to complete, # And if they don't then we mark them for manual intervention alert['loop'] += 1 if alert['loop'] > (TIME_TO_BUILD + TIME_TO_TEST + PENDING_TIME + TIME_TO_WAIT) / CYCLE_TIME: alert['stage'] = -1 alert['user'] = '******' else: alert['stage'] = 3 break if alert['stage'] != 4: break if alert['stage'] != 4: updateAlert(alert['id'], alert['revision'], alert['buildername'], alert['test'], alert['stage'], alert['loop'], alert['user']) continue alert['stage'] = 5 # final stage, sheriff will check for this. updateAlert(alert['id'], alert['revision'], alert['buildername'], alert['test'], alert['stage'], alert['loop'], alert['user'])
def main(): options = parse_args() validate_options(options) repo_url = query_repo_url(options.repo_name) if not valid_credentials(): sys.exit(-1) if options.debug: LOG = setup_logging(logging.DEBUG) else: LOG = setup_logging(logging.INFO) # Setting the QUERY_SOURCE global variable in mozci.py set_query_source(options.query_source) if options.buildernames: options.buildernames = sanitize_buildernames(options.buildernames) repo_url = query_repo_url_from_buildername(options.buildernames[0]) if not options.repo_name: options.repo_name = query_repo_name_from_buildername( options.buildernames[0]) if options.rev == 'tip': revision = query_repo_tip(repo_url) LOG.info("The tip of %s is %s", options.repo_name, revision) else: revision = query_full_revision_info(repo_url, options.rev) # Mode 1: Trigger coalesced jobs if options.coalesced: query_api = BuildApi() request_ids = query_api.find_all_jobs_by_status( options.repo_name, revision, COALESCED) if len(request_ids) == 0: LOG.info('We did not find any coalesced job') for request_id in request_ids: make_retrigger_request(repo_name=options.repo_name, request_id=request_id, dry_run=options.dry_run) return # Mode #2: Fill-in a revision if options.fill_revision: trigger_missing_jobs_for_revision(repo_name=options.repo_name, revision=revision, dry_run=options.dry_run) return # Mode #3: Trigger jobs based on revision list modifiers for buildername in options.buildernames: revlist = determine_revlist(repo_url=repo_url, buildername=buildername, rev=revision, back_revisions=options.back_revisions, delta=options.delta, from_rev=options.from_rev, backfill=options.backfill, skips=options.skips, max_revisions=options.max_revisions) try: trigger_range( buildername=buildername, revisions=revlist, times=options.times, dry_run=options.dry_run, files=options.files, trigger_build_if_missing=options.trigger_build_if_missing) except Exception, e: LOG.exception(e) exit(1) if revlist: LOG.info('https://treeherder.mozilla.org/#/jobs?%s' % urllib.urlencode({ 'repo': options.repo_name, 'fromchange': revlist[-1], 'tochange': revlist[0], 'filter-searchStr': buildername }))
def main(): alerts = getAlerts() for alert in alerts: # new alert LOG.info("Running alert for: [%s, %s, %s]" % (alert['test'], alert['buildername'], alert['revision'])) if alert['stage'] == 0: LOG.info("We are in stage 0.") if checkMerge(alert['revision'], alert['buildername']) or 'pgo' in alert['buildername']: LOG.info("We are ignoring alert: %s since it is either a merge or a pgo job." % alert['test']) alert['stage'] = -1 # We need to have manual inspection in this case. alert['user'] = '******' updateAlert(alert['id'], alert['revision'], alert['buildername'], alert['test'], alert['stage'], alert['loop'], alert['user']) else: alert['stage'] = 1 # trigger jobs for backfill if alert['stage'] == 1: LOG.info("We are in stage 1, and going to backfill jobs.") revisionList = getRevisions(alert['revision'], alert['buildername'], start=-2, end=2) # Setting Treeherder as the source for querying. set_query_source("treeherder") trigger_range(alert['buildername'], revisionList, times=6, dry_run=DRY_RUN) alert['stage'] = 2 # We want some time interval between stage 1 and 2, so we exit. updateAlert(alert['id'], alert['revision'], alert['buildername'], alert['test'], alert['stage'], alert['loop'], alert['user']) continue # verify jobs for backfill if alert['stage'] == 2: LOG.info("We are in stage 2, and going to verify if jobs are backfilled.") revisionList = getRevisions(alert['revision'], alert['buildername'], start=-2, end=2) for revision in revisionList: dataPoints = getSuccessfulJobs(revision, alert['buildername']) # If dataPoints are less than 6, it means that builds/jobs are still running. if dataPoints < 6: print "data points <6 for revision: %s" % revision # We wait for 6 hours for all triggered tests to complete, # And if they don't then we mark them for manual intervention/ alert['loop'] += 1 if alert['loop'] > (TIME_TO_BUILD + TIME_TO_TEST + PENDING_TIME) / CYCLE_TIME: LOG.info("The jobs did not complete backfilling in time, assigning for human inspection.") alert['stage'] = -1 alert['user'] = '******' else: LOG.info("The jobs have not completed backfilling. Looping back to stage 1.") alert['stage'] = 1 break if alert['stage'] != 2: print "updating alert and then continue, not stage 2" updateAlert(alert['id'], alert['revision'], alert['buildername'], alert['test'], alert['stage'], alert['loop'], alert['user']) continue badRevisions = [] # Reset the loop for upcoming stages alert['loop'] = 0 for i in range(1, len(revisionList)): print "getting results for revision number: %s" % i results = compare(alert['test'], alert['buildername'], revisionList[i], revisionList[i-1]) print "compare returned: %s" % results if results < -2.0: print "appending bad revision to list: %s"% revisionList[i] badRevisions.append(revisionList[i]) if len(badRevisions) != 1: LOG.info("There are too many bad revisions: %s for alert %s on buildername %s, " "assigning for human inspection." % (badRevisions, alert['test'], alert['buildername'])) alert['stage'] = -1 # too noisy, something bad happened alert['user'] = '******' print "too many bad revisions, update alert to human" updateAlert(alert['id'], alert['revision'], alert['buildername'], alert['test'], alert['stage'], alert['loop'], alert['user']) continue if checkMerge(badRevisions[0], alert['buildername']): LOG.info("The bad revision %s identified for alert %s on buildername %s is a merge, " "assigning for human inspection" % (badRevisions[0], alert['test'], alert['buildername'])) alert['stage'] = -1 # A merge revision is a bad revision, manually inspect alert['user'] = '******' if alert['revision'] != badRevisions[0]: LOG.info("Alert_Manager misreported the bad revision. The actual bad revision is %s " "for alert %s on %s buildername." % (badRevisions[0], alert['test'], alert['buildername'])) alert['revision'] = badRevisions[0] # we misreported initially, change the actual regression revision print "setting stage = 3!" alert['stage'] = 3 # Trigger all talos stage if alert['stage'] == 3: LOG.info("We are in stage 3, and going to trigger all_talos jobs.") repo_name = query_repo_name_from_buildername(alert['buildername']) # Setting Treeherder as the source for querying. set_query_source("treeherder") trigger_all_talos_jobs(repo_name, alert['revision'], times=6, dry_run=DRY_RUN) previousRevision = getRevisions(alert['revision'], alert['buildername'], start=-1, end=-1)[0] trigger_all_talos_jobs(repo_name, previousRevision, times=6, dry_run=DRY_RUN) alert['stage'] = 4 updateAlert(alert['id'], alert['revision'], alert['buildername'], alert['test'], alert['stage'], alert['loop'], alert['user']) continue # Verify All talos stage is completed if alert['stage'] == 4: LOG.info("We are in stage 4, and going to verify if all_talos ran successfully.") previousRevision = getRevisions(alert['revision'], alert['buildername'], start=-1, end=-1)[0] repo_name = query_repo_name_from_buildername(alert['buildername']) all_buildernames = build_talos_buildernames_for_repo(repo_name) for revision in [alert['revision'], previousRevision]: for buildername in all_buildernames: dataPoints = getSuccessfulJobs(revision, buildername) if dataPoints < 6: # We wait for 8 hours for all talos tests to complete, # And if they don't then we mark them for manual intervention alert['loop'] += 1 if alert['loop'] > (TIME_TO_BUILD + TIME_TO_TEST + PENDING_TIME + TIME_TO_WAIT) / CYCLE_TIME: LOG.info("The all talos jobs for alert %s on %s revision did not complete in time, " " assigning for human inspection." % (alert['test'], alert['revision'])) alert['stage'] = -1 alert['user'] = '******' else: alert['stage'] = 3 break if alert['stage'] != 4: break if alert['stage'] != 4: updateAlert(alert['id'], alert['revision'], alert['buildername'], alert['test'], alert['stage'], alert['loop'], alert['user']) continue alert['stage'] = 5 # final stage, sheriff will check for this. alert['user'] = '******' LOG.info("All automated parts are complete.") updateAlert(alert['id'], alert['revision'], alert['buildername'], alert['test'], alert['stage'], alert['loop'], alert['user'])
def test_query_repo_name_from_buildername_b2g(self, query_repositories): """Test query_repo_name_from_buildername with a b2g job.""" assert query_repo_name_from_buildername("b2g_real-repo_win32_gecko build") == "real-repo"
def main(): options = parse_args() if options.debug: LOG = setup_logging(logging.DEBUG) else: LOG = setup_logging(logging.INFO) validate_options(options) if not options.dry_run and not valid_credentials(): sys.exit(-1) # Setting the QUERY_SOURCE global variable in mozci.py set_query_source(options.query_source) if options.buildernames: options.buildernames = sanitize_buildernames(options.buildernames) repo_url = query_repo_url_from_buildername(options.buildernames[0]) if not options.repo_name: repo_name = query_repo_name_from_buildername(options.buildernames[0]) else: repo_name = options.repo_name repo_url = query_repo_url(repo_name) if options.rev == 'tip': revision = query_repo_tip(repo_url).changesets[0].node LOG.info("The tip of %s is %s", repo_name, revision) else: revision = query_push_by_revision(repo_url, options.rev, return_revision_list=True) # Schedule jobs through TaskCluster if --taskcluster option has been set to true if options.taskcluster: mgr = TaskClusterBuildbotManager() else: mgr = BuildAPIManager() trigger_build_if_missing = options.trigger_build_if_missing if repo_name == 'try': trigger_build_if_missing = False # Mode 0: Backfill if options.backfill: manual_backfill(revision, options.buildernames[0], dry_run=options.dry_run) return # Mode 1: Trigger coalesced jobs if options.coalesced: query_api = BuildApi() request_ids = query_api.find_all_jobs_by_status(repo_name, revision, COALESCED) if len(request_ids) == 0: LOG.info('We did not find any coalesced job') for request_id in request_ids: make_retrigger_request(repo_name=repo_name, request_id=request_id, auth=get_credentials(), dry_run=options.dry_run) return # Mode #2: Fill-in a revision or trigger_test_jobs_only if options.fill_revision or options.trigger_tests_only: mgr.trigger_missing_jobs_for_revision( repo_name=repo_name, revision=revision, dry_run=options.dry_run, trigger_build_if_missing=not options.trigger_tests_only ) return # Mode #3: Trigger jobs based on revision list modifiers if not (options.includes or options.exclude or options.failed_jobs): job_names = options.buildernames # Mode 4 - Schedule every builder matching --includes and does not match --exclude. elif options.includes or options.exclude: filters_in = options.includes.split(',') + [repo_name] filters_out = [] if options.exclude: filters_out = options.exclude.split(',') job_names = filter_buildernames( buildernames=query_builders(repo_name=repo_name), include=filters_in, exclude=filters_out ) if len(job_names) == 0: LOG.info("0 jobs match these filters. please try again.") return if options.existing_only: # We query all successful jobs for a given revision and filter # them by include/exclude filters. trigger_build_if_missing = False successful_jobs = TreeherderApi().find_all_jobs_by_status( repo_name=repo_name, revision=revision, status=SUCCESS) # We will filter out all the existing job from those successful job we have. job_names = [buildername for buildername in successful_jobs if buildername in job_names] cont = raw_input("The ones which have existing builds out of %i jobs will be triggered,\ do you wish to continue? y/n/d (d=show details) " % len(job_names)) else: cont = raw_input("%i jobs will be triggered, do you wish to continue? \ y/n/d (d=show details) " % len(job_names)) if cont.lower() == 'd': LOG.info("The following jobs will be triggered: \n %s" % '\n'.join(job_names)) cont = raw_input("Do you wish to continue? y/n ") if cont.lower() != 'y': exit(1) # Mode 5: Use --failed-jobs to trigger jobs for particular revision elif options.failed_jobs: job_names = TreeherderApi().find_all_jobs_by_status( repo_name=repo_name, revision=revision, status=WARNING) for buildername in job_names: revlist = determine_revlist( repo_url=repo_url, buildername=buildername, rev=revision, back_revisions=options.back_revisions, delta=options.delta, from_rev=options.from_rev, backfill=options.backfill, skips=options.skips, max_revisions=options.max_revisions) _print_treeherder_link( revlist=revlist, repo_name=repo_name, buildername=buildername, revision=revision, log=LOG, includes=options.includes, exclude=options.exclude) try: mgr.trigger_range( buildername=buildername, repo_name=repo_name, revisions=revlist, times=options.times, dry_run=options.dry_run, files=options.files, trigger_build_if_missing=trigger_build_if_missing ) except Exception, e: LOG.exception(e) exit(1)
def compare(test, buildername, revision, previous_revision): "This function will compare between 2 given revisions and return result as percentage" repo_name = query_repo_name_from_buildername(buildername) # Using TWO_WEEKS as interval, may change it afterwards signature_request_url = SIGNATURE_URL % (repo_name, TWO_WEEKS) signatures = fetch_json(signature_request_url) options_collection_hash_list = fetch_json(OPTION_COLLECTION_HASH) for signature, value in signatures.iteritems(): # Skip processing subtests. They are identified by 'test' key in the dicitonary. if 'test' in value: continue # Ignoring e10s here. # TODO: Revisit this later if TBPL_TESTS[test]['testname'].lower() == value['suite'].lower() and \ TREEHERDER_PLATFORM[value["machine_platform"]] in buildername and \ 'test_options' not in value: test_signature = signature else: continue hash_signature = value['option_collection_hash'] for key in options_collection_hash_list: if hash_signature == key["option_collection_hash"]: typeOfTest = key["options"][0]["name"] break if typeOfTest == 'pgo' and typeOfTest not in buildername: # if pgo, it should be present in buildername continue elif typeOfTest == 'opt': # if opt, nothing present in buildername break else: # We do not run talos on any branch other than pgo and opt. continue # Using TWO_WEEKS as interval, may change it afterwards req = fetch_json(PERFORMANCE_DATA % (repo_name, TWO_WEEKS, test_signature)) performance_data = req[test_signature] treeherder_client = TreeherderClient() revision_resultset_id = treeherder_client.get_resultsets(repo_name, revision=revision)[0]["id"] previous_revision_resultset_id = treeherder_client.get_resultsets(repo_name, revision=previous_revision)[0]["id"] revision_perfdata = [] previous_revision_perfdata = [] for data in performance_data: if data["result_set_id"] == revision_resultset_id: revision_perfdata.append(data["value"]) elif data["result_set_id"] == previous_revision_resultset_id: previous_revision_perfdata.append(data["value"]) if revision_perfdata and previous_revision_perfdata: mean_revision_perfdata = sum(revision_perfdata) / float(len(revision_perfdata)) mean_previous_revision_perfdata = sum(previous_revision_perfdata) / float(len(previous_revision_perfdata)) else: print "previous_revision_perfdata: %s" % previous_revision_perfdata print "revision_perfdata: %s" % revision_perfdata return 0 if test in REVERSE_TESTS: # lower value results in regression return (mean_revision_perfdata - mean_previous_revision_perfdata) * 100.0 / mean_previous_revision_perfdata else: # higher value results in regression return (mean_previous_revision_perfdata - mean_revision_perfdata) * 100.0 / mean_previous_revision_perfdata
revlist = backfill_revlist( options.buildername, revlist, options.times, options.dry_run ) else: revlist = [options.rev] if options.skips: revlist = revlist[::options.skips] try: trigger_range( buildername=options.buildername, revisions=revlist, times=options.times, dry_run=options.dry_run ) except Exception, e: LOG.exception(e) exit(1) if revlist: LOG.info('https://treeherder.mozilla.org/#/jobs?%s' % urllib.urlencode({'repo': query_repo_name_from_buildername(options.buildername), 'fromchange': revlist[-1], 'tochange': revlist[0], 'filter-searchStr': options.buildername}))
def test_query_repo_name_form_buildername_normal(self, query_repositories): """Test query_repo_name_from_buildername with a normal job.""" assert query_repo_name_from_buildername("Linux real-repo opt build") == "real-repo"
def main(): options = parse_args() if options.debug: LOG = setup_logging(logging.DEBUG) else: LOG = setup_logging(logging.INFO) if options.action == 'trigger-all-talos': trigger_all_talos_jobs(options.repo_name, options.rev, options.times, dry_run=options.dry_run) sys.exit(0) validate_options(options) if not options.dry_run and not valid_credentials(): sys.exit(-1) # Setting the QUERY_SOURCE global variable in mozci.py set_query_source(options.query_source) if options.buildernames: options.buildernames = sanitize_buildernames(options.buildernames) repo_url = query_repo_url_from_buildername(options.buildernames[0]) if not options.repo_name: repo_name = query_repo_name_from_buildername(options.buildernames[0]) else: repo_name = options.repo_name repo_url = query_repo_url(repo_name) if options.rev == 'tip': revision = query_repo_tip(repo_url).changesets[0].node LOG.info("The tip of %s is %s", repo_name, revision) else: revision = query_push_by_revision(repo_url, options.rev, return_revision_list=True) # Schedule jobs through TaskCluster if --taskcluster option has been set to true if options.taskcluster: mgr = TaskClusterBuildbotManager(web_auth=True) else: mgr = BuildAPIManager() trigger_build_if_missing = options.trigger_build_if_missing if repo_name == 'try': trigger_build_if_missing = False # Mode 0: Backfill if options.backfill: manual_backfill(revision, options.buildernames[0], dry_run=options.dry_run) return # Mode 1: Trigger coalesced jobs if options.coalesced: query_api = BuildApi() request_ids = query_api.find_all_jobs_by_status(repo_name, revision, COALESCED) if len(request_ids) == 0: LOG.info('We did not find any coalesced job') for request_id in request_ids: make_retrigger_request(repo_name=repo_name, request_id=request_id, auth=get_credentials(), dry_run=options.dry_run) return # Mode #2: Fill-in a revision or trigger_test_jobs_only if options.fill_revision or options.trigger_tests_only: mgr.trigger_missing_jobs_for_revision( repo_name=repo_name, revision=revision, dry_run=options.dry_run, trigger_build_if_missing=not options.trigger_tests_only ) return # Mode #3: Trigger jobs based on revision list modifiers if not (options.includes or options.exclude or options.failed_jobs or options.trigger_talos_for_build): job_names = options.buildernames # Mode 4 - Schedule every builder matching --includes and does not match --exclude. elif options.includes or options.exclude: _includes_excludes(options) # Mode 5: Use --failed-jobs to trigger jobs for particular revision elif options.failed_jobs: job_names = TreeherderApi().find_all_jobs_by_status( repo_name=repo_name, revision=revision, status=WARNING) elif options.trigger_talos_for_build: trigger_talos_jobs_for_build( buildername=options.buildernames[0], revision=revision, times=2, dry_run=options.dry_run, ) exit(0) for buildername in job_names: revlist = determine_revlist( repo_url=repo_url, buildername=buildername, rev=revision, back_revisions=options.back_revisions, delta=options.delta, from_rev=options.from_rev, backfill=options.backfill, skips=options.skips, max_revisions=options.max_revisions) _print_treeherder_link( revlist=revlist, repo_name=repo_name, buildername=buildername, revision=revision, log=LOG, includes=options.includes, exclude=options.exclude) try: mgr.trigger_range( buildername=buildername, repo_name=repo_name, revisions=revlist, times=options.times, dry_run=options.dry_run, files=options.files, trigger_build_if_missing=trigger_build_if_missing ) except Exception as e: LOG.exception(e) exit(1)
def test_query_repo_name_from_buildername_invalid(self, query_repositories): """If no repo name is found at the job, the function should raise an Exception.""" with pytest.raises(Exception): query_repo_name_from_buildername("Linux not-a-repo opt build")
action="store_true", dest="debug", help="set debug for logging.") options = parser.parse_args() if options.debug: LOG.setLevel(logging.DEBUG) LOG.info("Setting DEBUG level") else: LOG.setLevel(logging.INFO) # requests is too noisy and adds no value logging.getLogger("requests").setLevel(logging.WARNING) repo_name = query_repo_name_from_buildername(options.buildername) all_jobs = query_jobs_schedule(repo_name, options.rev) jobs = _matching_jobs(options.buildername, all_jobs) import pprint for schedule_info in jobs: status = query_job_status(schedule_info) if status == COALESCED: print "%d %s %s/%s/build/%s" % \ (schedule_info["requests"][0]["request_id"], RESULTS[status], HOST_ROOT, repo_name, schedule_info["build_id"]) status_info = _status_info(schedule_info) pprint.pprint(status_info) revision = status_info["properties"]["revision"] # Print the job that was coalesced with print 'https://treeherder.mozilla.org/#/jobs?%s' % \
def main(): options = parse_args() if options.debug: LOG = setup_logging(logging.DEBUG) else: LOG = setup_logging(logging.INFO) if options.action == 'trigger-all-talos': trigger_all_talos_jobs(options.repo_name, options.rev, options.times, dry_run=options.dry_run) sys.exit(0) validate_options(options) if not options.dry_run and not valid_credentials(): sys.exit(-1) # Setting the QUERY_SOURCE global variable in mozci.py set_query_source(options.query_source) if options.buildernames: options.buildernames = sanitize_buildernames(options.buildernames) repo_url = query_repo_url_from_buildername(options.buildernames[0]) if not options.repo_name: repo_name = query_repo_name_from_buildername(options.buildernames[0]) else: repo_name = options.repo_name repo_url = query_repo_url(repo_name) if options.rev == 'tip': revision = query_repo_tip(repo_url).changesets[0].node LOG.info("The tip of %s is %s", repo_name, revision) else: revision = query_push_by_revision(repo_url, options.rev, return_revision_list=True) # Schedule jobs through TaskCluster if --taskcluster option has been set to true if options.taskcluster: mgr = TaskClusterBuildbotManager(web_auth=True) else: mgr = BuildAPIManager() trigger_build_if_missing = options.trigger_build_if_missing if repo_name == 'try': trigger_build_if_missing = False # Mode 0: Backfill if options.backfill: manual_backfill(revision, options.buildernames[0], dry_run=options.dry_run) return # Mode 1: Trigger coalesced jobs if options.coalesced: query_api = BuildApi() request_ids = query_api.find_all_jobs_by_status( repo_name, revision, COALESCED) if len(request_ids) == 0: LOG.info('We did not find any coalesced job') for request_id in request_ids: make_retrigger_request(repo_name=repo_name, request_id=request_id, auth=get_credentials(), dry_run=options.dry_run) return # Mode #2: Fill-in a revision or trigger_test_jobs_only if options.fill_revision or options.trigger_tests_only: mgr.trigger_missing_jobs_for_revision( repo_name=repo_name, revision=revision, dry_run=options.dry_run, trigger_build_if_missing=not options.trigger_tests_only) return # Mode #3: Trigger jobs based on revision list modifiers if not (options.includes or options.exclude or options.failed_jobs or options.trigger_talos_for_build): job_names = options.buildernames # Mode 4 - Schedule every builder matching --includes and does not match --exclude. elif options.includes or options.exclude: _includes_excludes(options) # Mode 5: Use --failed-jobs to trigger jobs for particular revision elif options.failed_jobs: job_names = TreeherderApi().find_all_jobs_by_status( repo_name=repo_name, revision=revision, status=WARNING) elif options.trigger_talos_for_build: trigger_talos_jobs_for_build( buildername=options.buildernames[0], revision=revision, times=2, dry_run=options.dry_run, ) exit(0) for buildername in job_names: revlist = determine_revlist(repo_url=repo_url, buildername=buildername, rev=revision, back_revisions=options.back_revisions, delta=options.delta, from_rev=options.from_rev, backfill=options.backfill, skips=options.skips, max_revisions=options.max_revisions) _print_treeherder_link(revlist=revlist, repo_name=repo_name, buildername=buildername, revision=revision, log=LOG, includes=options.includes, exclude=options.exclude) try: mgr.trigger_range( buildername=buildername, repo_name=repo_name, revisions=revlist, times=options.times, dry_run=options.dry_run, files=options.files, trigger_build_if_missing=trigger_build_if_missing) except Exception as e: LOG.exception(e) exit(1)
def main(): options = parse_args() validate_options(options) valid_credentials() if options.debug: LOG = setup_logging(logging.DEBUG) else: LOG = setup_logging(logging.INFO) # Setting the QUERY_SOURCE global variable in mozci.py set_query_source(options.query_source) if options.buildernames: options.buildernames = sanitize_buildernames(options.buildernames) repo_url = query_repo_url_from_buildername(options.buildernames[0]) if not options.repo_name: options.repo_name = query_repo_name_from_buildername(options.buildernames[0]) if options.rev == 'tip': repo_url = query_repo_url(options.repo_name) options.rev = query_repo_tip(repo_url) LOG.info("The tip of %s is %s", options.repo_name, options.rev) if options.coalesced: query_api = BuildApi() request_ids = query_api.find_all_jobs_by_status(options.repo_name, options.rev, COALESCED) if len(request_ids) == 0: LOG.info('We did not find any coalesced job') for request_id in request_ids: make_retrigger_request(repo_name=options.repo_name, request_id=request_id, dry_run=options.dry_run) return for buildername in options.buildernames: revlist = determine_revlist( repo_url=repo_url, buildername=buildername, rev=options.rev, back_revisions=options.back_revisions, delta=options.delta, from_rev=options.from_rev, backfill=options.backfill, skips=options.skips, max_revisions=options.max_revisions) try: trigger_range( buildername=buildername, revisions=revlist, times=options.times, dry_run=options.dry_run, files=options.files, trigger_build_if_missing=options.trigger_build_if_missing ) except Exception, e: LOG.exception(e) exit(1) if revlist: LOG.info('https://treeherder.mozilla.org/#/jobs?%s' % urllib.urlencode({'repo': options.repo_name, 'fromchange': revlist[-1], 'tochange': revlist[0], 'filter-searchStr': buildername}))
def main(): alerts = getAlerts() for alert in alerts: # new alert if alert['stage'] == 0: if checkMerge( alert['revision'], alert['buildername']) or 'pgo' in alert['buildername']: LOG.info( "We are ignoring this alert since it is either a merge or a pgo job." ) alert[ 'stage'] = -1 # We need to have manual inspection in this case. alert['user'] = '******' updateAlert(alert['id'], alert['revision'], alert['buildername'], alert['test'], alert['stage'], alert['loop'], alert['user']) else: alert['stage'] = 1 # trigger jobs for backfill if alert['stage'] == 1: LOG.info("We are in stage 1, and going to backfill jobs.") revisionList = getRevisions(alert['revision'], alert['buildername'], start=-2, end=2) trigger_range(alert['buildername'], revisionList, times=6, dry_run=DRY_RUN) alert['stage'] = 2 # We want some time interval between stage 1 and 2, so we exit. updateAlert(alert['id'], alert['revision'], alert['buildername'], alert['test'], alert['stage'], alert['loop'], alert['user']) continue # verify jobs for backfill if alert['stage'] == 2: LOG.info( "We are in stage 2, and going to verify if jobs are backfilled." ) revisionList = getRevisions(alert['revision'], alert['buildername'], start=-2, end=2) for revision in revisionList: dataPoints = getSuccessfulJobs(revision, alert['buildername']) # If dataPoints are less than 6, it means that builds/jobs are still running. if dataPoints < 6: # We wait for 6 hours for all triggered tests to complete, # And if they don't then we mark them for manual intervention/ alert['loop'] += 1 if alert['loop'] > (TIME_TO_BUILD + TIME_TO_TEST + PENDING_TIME) / CYCLE_TIME: alert['stage'] = -1 alert['user'] = '******' else: alert['stage'] = 1 break if alert['stage'] != 2: updateAlert(alert['id'], alert['revision'], alert['buildername'], alert['test'], alert['stage'], alert['loop'], alert['user']) continue badRevisions = [] # Reset the loop for upcoming stages alert['loop'] = 0 for i in range(1, len(revisionList)): results = compare(alert['test'], alert['buildername'], revisionList[i], revisionList[i - 1]) if results < -2.0: badRevisions.append(revisionList[i]) if len(badRevisions) != 1: alert['stage'] = -1 # too noisy, something bad happened alert['user'] = '******' updateAlert(alert['id'], alert['revision'], alert['buildername'], alert['test'], alert['stage'], alert['loop'], alert['user']) continue if checkMerge(badRevisions[0], alert['buildername']): alert[ 'stage'] = -1 # A merge revision is a bad revision, manually inspect alert['user'] = '******' if alert['revision'] != badRevisions[0]: alert['revision'] = badRevisions[ 0] # we misreported initially, change the actual regression revision alert['stage'] = 3 # Trigger all talos stage if alert['stage'] == 3: LOG.info("We are in stage 3, and going to trigger all_talos jobs.") repo_name = query_repo_name_from_buildername(alert['buildername']) trigger_all_talos_jobs(repo_name, alert['revision'], times=6, dry_run=DRY_RUN) previousRevision = getRevisions(alert['revision'], alert['buildername'], start=-1, end=-1)[0] trigger_all_talos_jobs(repo_name, previousRevision, times=6, dry_run=DRY_RUN) alert['stage'] = 4 updateAlert(alert['id'], alert['revision'], alert['buildername'], alert['test'], alert['stage'], alert['loop'], alert['user']) continue # Verify All talos stage is completed if alert['stage'] == 4: LOG.info( "We are in stage 4, and going to verify if all_talos ran successfully." ) previousRevision = getRevisions(alert['revision'], alert['buildername'], start=-1, end=-1)[0] repo_name = query_repo_name_from_buildername(alert['buildername']) all_buildernames = build_talos_buildernames_for_repo(repo_name) for revision in [alert['revision'], previousRevision]: for buildername in all_buildernames: dataPoints = getSuccessfulJobs(revision, buildername) if dataPoints < 6: # We wait for 8 hours for all talos tests to complete, # And if they don't then we mark them for manual intervention alert['loop'] += 1 if alert['loop'] > (TIME_TO_BUILD + TIME_TO_TEST + PENDING_TIME + TIME_TO_WAIT) / CYCLE_TIME: alert['stage'] = -1 alert['user'] = '******' else: alert['stage'] = 3 break if alert['stage'] != 4: break if alert['stage'] != 4: updateAlert(alert['id'], alert['revision'], alert['buildername'], alert['test'], alert['stage'], alert['loop'], alert['user']) continue alert['stage'] = 5 # final stage, sheriff will check for this. updateAlert(alert['id'], alert['revision'], alert['buildername'], alert['test'], alert['stage'], alert['loop'], alert['user'])