def main(): parser = ArgumentParser() parser.add_argument("--debug", action="store_true", dest="debug", help="set debug for logging.") parser.add_argument("--dry-run", action="store_true", dest="dry_run", help="Dry run. No real actions are taken.") parser.add_argument('task_ids', metavar='task_id', type=str, nargs='+', help='Task IDs to work with.') options = parser.parse_args() if options.debug: LOG = setup_logging(logging.DEBUG) else: LOG = setup_logging() sch = TaskClusterBuildbotManager() for t_id in options.task_ids: ret_code = sch.retrigger(uuid=t_id, dry_run=options.dry_run) if ret_code < 0: LOG.warning("We could not retrigger task %s" % t_id)
def main(): parser = ArgumentParser() parser.add_argument("--debug", action="store_true", dest="debug", help="set debug for logging.") parser.add_argument("--dry-run", action="store_true", dest="dry_run", help="Dry run. No real actions are taken.") parser.add_argument("--repo-name", action="store", dest="repo_name", type=str, help="Repository name, e.g. mozilla-inbound.") parser.add_argument("--revision", action="store", dest="revision", type=str, help="12-char representing a push.") parser.add_argument('builders_graph', metavar='builders_graph', help='Graph of builders in the form of: ' 'dict(builder: [dep_builders].') options = parser.parse_args() if options.debug: setup_logging(logging.DEBUG) else: setup_logging() mgr = TaskClusterBuildbotManager() # XXX: test what happens when we have a bad graph mgr.schedule_graph( repo_name=options.repo_name, revision=options.revision, builders_graph=ast.literal_eval(options.builders_graph), # dry_run=options.dry_run )
def main(): parser = ArgumentParser() parser.add_argument("--debug", action="store_true", dest="debug", help="set debug for logging.") parser.add_argument("--dry-run", action="store_true", dest="dry_run", help="Dry run. No real actions are taken.") parser.add_argument("--repo-name", action="store", dest="repo_name", type=str, help="Repository name, e.g. mozilla-inbound.") parser.add_argument("--revision", action="store", dest="revision", type=str, help="12-char representing a push.") parser.add_argument("--trigger-from-task-id", action="store", dest="trigger_from_task_id", type=str, help="Trigger builders based on build task (use with " "--builders).") parser.add_argument("--builders", action="store", dest="builders", type=str, help="Use this if you want to pass a list of builders " "(e.g. \"['builder 1']\".") parser.add_argument( "--children-of", action="store", dest="children_of", type=str, help="This allows you to request a list of all the associated " "test jobs to a build job.") parser.add_argument("-g", "--graph", action="store", dest="builders_graph", help='Graph of builders in the form of: ' 'dict(builder: [dep_builders].') options = parser.parse_args() if options.debug: setup_logging(logging.DEBUG) else: setup_logging() assert options.repo_name and options.revision, \ "Make sure you specify --repo-name and --revion" if not options.dry_run and not credentials_available(): sys.exit(1) mgr = TaskClusterBuildbotManager() builders = None if options.builders: builders = ast.literal_eval(options.builders) else: builders = get_downstream_jobs(options.children_of) if options.trigger_from_task_id and builders: trigger_builders_based_on_task_id(repo_name=options.repo_name, revision=options.revision, task_id=options.trigger_from_task_id, builders=builders, dry_run=options.dry_run) elif builders and len(builders) == 1: mgr.schedule_arbitrary_job(repo_name=options.repo_name, revision=options.revision, uuid=builders[0], dry_run=options.dry_run) elif options.builders_graph: mgr.schedule_graph(repo_name=options.repo_name, revision=options.revision, builders_graph=ast.literal_eval( options.builders_graph), dry_run=options.dry_run) else: print "Please read the help menu to know what options are available to you."
def on_runnable_job_event(data, message, dry_run, treeherder_host, acknowledge): # Cleaning mozci caches buildjson.BUILDS_CACHE = {} query_jobs.JOBS_CACHE = {} treeherder_client = TreeherderClient(host='treeherder.allizom.org') # XXX: # Grabbing data received over pulse repo_name = data["project"] requester = data["requester"] resultset_id = data["resultset_id"] buildernames = data["buildernames"] resultset = treeherder_client.get_resultsets(repo_name, id=resultset_id)[0] revision = resultset["revision"] author = resultset["author"] status = None treeherder_link = TREEHERDER % { 'host': treeherder_host, 'repo': repo_name, 'revision': resultset['revision'] } message_sender = MessageHandler() if not (requester.endswith('@mozilla.com') or author == requester or whitelisted_users(requester)): # We want to see this in the alerts LOG.error("Notice that we're letting %s schedule jobs for %s." % (requester, treeherder_link)) ''' # Everyone can press the button, but only authorized users can trigger jobs # TODO: remove this when proper LDAP identication is set up on TH if not (requester.endswith('@mozilla.com') or author == requester or whitelisted_users(requester)): if acknowledge: # Remove message from pulse queue message.ack() # We publish a message saying we will not trigger the job pulse_message = { 'resultset_id': resultset_id, 'requester': requester, 'status': "Could not determine if the user is authorized, nothing was triggered."} routing_key = '{}.{}'.format(repo_name, 'runnable') try: message_sender.publish_message(pulse_message, routing_key) except: LOG.warning("Failed to publish message over pulse stream.") LOG.error("Requester %s is not allowed to trigger jobs on %s." % (requester, treeherder_link)) return # Raising an exception adds too much noise ''' LOG.info("New jobs requested by %s for %s" % (requester, treeherder_link)) LOG.info("List of builders:") for b in buildernames: LOG.info("- %s" % b) buildernames = filter_invalid_builders(buildernames) # Treeherder can send us invalid builder names # https://bugzilla.mozilla.org/show_bug.cgi?id=1242038 if buildernames is None: if acknowledge: # We need to ack the message to remove it from our queue message.ack() return builders_graph, other_builders_to_schedule = buildbot_bridge.buildbot_graph_builder( builders=buildernames, revision=revision, complete=False # XXX: This can be removed when BBB is in use ) if builders_graph != {}: mgr = TaskClusterBuildbotManager() mgr.schedule_graph( repo_name=repo_name, revision=revision, metadata={ 'name': 'pulse_actions_graph', 'description': 'Adding new jobs to push via pulse_actions/treeherder for %s.' % requester, 'owner': requester, 'source': treeherder_link, }, builders_graph=builders_graph, dry_run=dry_run) else: LOG.info("We don't have anything to schedule through TaskCluster") if other_builders_to_schedule: # XXX: We should be able to replace this once all Buildbot jobs run through BBB # XXX: There might be a work around with # https://github.com/mozilla/mozilla_ci_tools/issues/424 LOG.info("We're going to schedule these builders via Buildapi: %s" % str(other_builders_to_schedule)) # This is used for test jobs which need an existing Buildbot job to be scheduled for buildername in other_builders_to_schedule: trigger_job(revision, buildername, dry_run=dry_run) else: LOG.info("We don't have anything to schedule through Buildapi") # Send a pulse message showing what we did message_sender = MessageHandler() pulse_message = { 'resultset_id': resultset_id, 'graph': builders_graph, 'requester': requester, 'status': status } routing_key = '{}.{}'.format(repo_name, 'runnable') try: message_sender.publish_message(pulse_message, routing_key) except: LOG.warning("Failed to publish message over pulse stream.") if acknowledge: # We need to ack the message to remove it from our queue message.ack()
def main(): options = parse_args() if options.debug: LOG = setup_logging(logging.DEBUG) else: LOG = setup_logging(logging.INFO) validate_options(options) if not valid_credentials(): sys.exit(-1) # Setting the QUERY_SOURCE global variable in mozci.py set_query_source(options.query_source) if options.buildernames: options.buildernames = sanitize_buildernames(options.buildernames) repo_url = query_repo_url_from_buildername(options.buildernames[0]) if not options.repo_name: repo_name = query_repo_name_from_buildername(options.buildernames[0]) else: repo_name = options.repo_name repo_url = query_repo_url(repo_name) if options.rev == 'tip': revision = query_repo_tip(repo_url).changesets[0].node LOG.info("The tip of %s is %s", repo_name, revision) else: revision = query_push_by_revision(repo_url, options.rev, return_revision_list=True) # Schedule jobs through TaskCluster if --taskcluster option has been set to true if options.taskcluster: mgr = TaskClusterBuildbotManager() else: mgr = BuildAPIManager() trigger_build_if_missing = options.trigger_build_if_missing if repo_name == 'try': trigger_build_if_missing = False # Mode 1: Trigger coalesced jobs if options.coalesced: query_api = BuildApi() request_ids = query_api.find_all_jobs_by_status(repo_name, revision, COALESCED) if len(request_ids) == 0: LOG.info('We did not find any coalesced job') for request_id in request_ids: make_retrigger_request(repo_name=repo_name, request_id=request_id, auth=get_credentials(), dry_run=options.dry_run) return # Mode #2: Fill-in a revision or trigger_test_jobs_only if options.fill_revision or options.trigger_tests_only: mgr.trigger_missing_jobs_for_revision( repo_name=repo_name, revision=revision, dry_run=options.dry_run, trigger_build_if_missing=not options.trigger_tests_only ) return # Mode #3: Trigger jobs based on revision list modifiers if not (options.includes or options.exclude or options.failed_jobs): job_names = options.buildernames # Mode 4 - Schedule every builder matching --includes and does not match --exclude. elif options.includes or options.exclude: filters_in = options.includes.split(',') + [repo_name] filters_out = [] if options.exclude: filters_out = options.exclude.split(',') job_names = filter_buildernames( buildernames=query_builders(repo_name=repo_name), include=filters_in, exclude=filters_out ) if len(job_names) == 0: LOG.info("0 jobs match these filters. please try again.") return if options.existing_only: # We query all succesful jobs for a given revision and filter # them by include/exclude filters. trigger_build_if_missing = False successful_jobs = TreeherderApi().find_all_jobs_by_status( repo_name=repo_name, revision=revision, status=SUCCESS) # We will filter out all the existing job from those successful job we have. job_names = [buildername for buildername in successful_jobs if buildername in job_names] cont = raw_input("The ones which have existing builds out of %i jobs will be triggered,\ do you wish to continue? y/n/d (d=show details) " % len(job_names)) else: cont = raw_input("%i jobs will be triggered, do you wish to continue? \ y/n/d (d=show details) " % len(job_names)) if cont.lower() == 'd': LOG.info("The following jobs will be triggered: \n %s" % '\n'.join(job_names)) cont = raw_input("Do you wish to continue? y/n ") if cont.lower() != 'y': exit(1) # Mode 5: Use --failed-jobs to trigger jobs for particular revision elif options.failed_jobs: job_names = TreeherderApi().find_all_jobs_by_status( repo_name=repo_name, revision=revision, status=WARNING) for buildername in job_names: revlist = determine_revlist( repo_url=repo_url, buildername=buildername, rev=revision, back_revisions=options.back_revisions, delta=options.delta, from_rev=options.from_rev, backfill=options.backfill, skips=options.skips, max_revisions=options.max_revisions) _print_treeherder_link( revlist=revlist, repo_name=repo_name, buildername=buildername, revision=revision, log=LOG, includes=options.includes, exclude=options.exclude) try: mgr.trigger_range( buildername=buildername, repo_name=repo_name, revisions=revlist, times=options.times, dry_run=options.dry_run, files=options.files, trigger_build_if_missing=trigger_build_if_missing ) except Exception, e: LOG.exception(e) exit(1)
def on_runnable_job_event(data, message, dry_run, stage): # Cleaning mozci caches buildjson.BUILDS_CACHE = {} query_jobs.JOBS_CACHE = {} if stage: treeherder_client = TreeherderClient(host='treeherder.allizom.org') else: treeherder_client = TreeherderClient() # Grabbing data received over pulse repo_name = data["project"] requester = data["requester"] resultset_id = data["resultset_id"] buildernames = data["buildernames"] resultset = treeherder_client.get_resultsets(repo_name, id=resultset_id)[0] revision = resultset["revision"] author = resultset["author"] status = None treeherder_link = TREEHERDER % {'repo': repo_name, 'revision': resultset['revision']} message_sender = MessageHandler() # Everyone can press the button, but only authorized users can trigger jobs # TODO: remove this when proper LDAP identication is set up on TH if not (requester.endswith('@mozilla.com') or author == requester or whitelisted_users(requester)): if not dry_run: # Remove message from pulse queue message.ack() # We publish a message saying we will not trigger the job pulse_message = { 'resultset_id': resultset_id, 'requester': requester, 'status': "Could not determine if the user is authorized, nothing was triggered."} routing_key = '{}.{}'.format(repo_name, 'runnable') try: message_sender.publish_message(pulse_message, routing_key) except: LOG.warning("Failed to publish message over pulse stream.") LOG.error("Requester %s is not allowed to trigger jobs on %s." % (requester, treeherder_link)) return # Raising an exception adds too much noise LOG.info("New jobs requested by %s for %s" % (requester, treeherder_link)) LOG.info("List of builders:") for b in buildernames: LOG.info("- %s" % b) buildernames = filter_invalid_builders(buildernames) # Treeherder can send us invalid builder names # https://bugzilla.mozilla.org/show_bug.cgi?id=1242038 if buildernames is None: if not dry_run: # We need to ack the message to remove it from our queue message.ack() return builders_graph, other_builders_to_schedule = buildbot_bridge.buildbot_graph_builder( builders=buildernames, revision=revision, complete=False # XXX: This can be removed when BBB is in use ) if builders_graph != {}: mgr = TaskClusterBuildbotManager() mgr.schedule_graph( repo_name=repo_name, revision=revision, metadata={ 'name': 'pulse_actions_graph', 'description': 'Adding new jobs to push via pulse_actions/treeherder for %s.' % requester, 'owner': requester, 'source': treeherder_link, }, builders_graph=builders_graph, dry_run=dry_run) else: LOG.info("We don't have anything to schedule through TaskCluster") if other_builders_to_schedule: # XXX: We should be able to replace this once all Buildbot jobs run through BBB # XXX: There might be a work around with # https://github.com/mozilla/mozilla_ci_tools/issues/424 LOG.info("We're going to schedule these builders via Buildapi.") # This is used for test jobs which need an existing Buildbot job to be scheduled for buildername in other_builders_to_schedule: trigger_job(revision, buildername, dry_run=dry_run) else: LOG.info("We don't have anything to schedule through Buildapi") # Send a pulse message showing what we did message_sender = MessageHandler() pulse_message = { 'resultset_id': resultset_id, 'graph': builders_graph, 'requester': requester, 'status': status} routing_key = '{}.{}'.format(repo_name, 'runnable') try: message_sender.publish_message(pulse_message, routing_key) except: LOG.warning("Failed to publish message over pulse stream.") if not dry_run: # We need to ack the message to remove it from our queue message.ack()
def main(): parser = ArgumentParser() parser.add_argument("--debug", action="store_true", dest="debug", help="set debug for logging.") parser.add_argument("--dry-run", action="store_true", dest="dry_run", help="Dry run. No real actions are taken.") parser.add_argument("--repo-name", action="store", dest="repo_name", type=str, help="Repository name, e.g. mozilla-inbound.") parser.add_argument("--revision", action="store", dest="revision", type=str, help="12-char representing a push.") parser.add_argument("--trigger-from-task-id", action="store", dest="trigger_from_task_id", type=str, help="Trigger builders based on build task (use with " "--builders).") parser.add_argument("--builders", action="store", dest="builders", type=str, help="Use this if you want to pass a list of builders " "(e.g. \"['builder 1']\".") parser.add_argument("--children-of", action="store", dest="children_of", type=str, help="This allows you to request a list of all the associated " "test jobs to a build job.") parser.add_argument("-g", "--graph", action="store", dest="builders_graph", help='Graph of builders in the form of: ' 'dict(builder: [dep_builders].') options = parser.parse_args() if options.debug: setup_logging(logging.DEBUG) else: setup_logging() assert options.repo_name and options.revision, \ "Make sure you specify --repo-name and --revision" if not options.dry_run and not credentials_available(): sys.exit(1) mgr = TaskClusterBuildbotManager() builders = None if options.builders: builders = ast.literal_eval(options.builders) else: builders = get_downstream_jobs(options.children_of) if options.trigger_from_task_id and builders: trigger_builders_based_on_task_id( repo_name=options.repo_name, revision=options.revision, task_id=options.trigger_from_task_id, builders=builders, dry_run=options.dry_run ) elif builders and len(builders) == 1: mgr.schedule_arbitrary_job( repo_name=options.repo_name, revision=options.revision, uuid=builders[0], dry_run=options.dry_run ) elif options.builders_graph: mgr.schedule_graph( repo_name=options.repo_name, revision=options.revision, builders_graph=ast.literal_eval(options.builders_graph), dry_run=options.dry_run ) else: print "Please read the help menu to know what options are available to you."