def on_event(data, message, dry_run, **kwargs):
    """
    Whenever PGO builds are completed in mozilla-inbound or fx-team,
    we trigger the corresponding talos jobs twice.
    """
    if ignored(data):
        LOG.debug("'%s' with status %i. Nothing to be done.",
                  data['payload']['buildername'], data['payload']['status'])
        return 0  # SUCCESS

    # Cleaning mozci caches
    buildjson.BUILDS_CACHE = {}
    query_jobs.JOBS_CACHE = {}
    payload = data["payload"]
    buildername = payload["buildername"]
    revision = payload["revision"]

    # Treeherder can send us invalid builder names
    # https://bugzilla.mozilla.org/show_bug.cgi?id=1242038
    buildername = filter_invalid_builders(buildername)

    if buildername is None:
        return -1  # FAILURE

    status = trigger_talos_jobs_for_build(buildername=buildername,
                                          revision=revision,
                                          times=2,
                                          dry_run=dry_run)

    LOG.info('We triggered talos jobs for the build.')
    return status
def on_buildbot_event(data, message, dry_run, stage=False):
    """Act upon buildbot events."""
    # Pulse gives us a job_id and a job_guid, we need request_id.
    LOG.info(
        "%s action requested by %s on repo_name %s with job_id: %s"
        % (data["action"], data["requester"], data["project"], data["job_id"])
    )
    # Cleaning mozci caches
    buildjson.BUILDS_CACHE = {}
    query_jobs.JOBS_CACHE = {}

    if stage:
        treeherder_client = TreeherderClient(host="treeherder.allizom.org")
    else:
        treeherder_client = TreeherderClient()
    repo_name = data["project"]
    job_id = data["job_id"]
    result = treeherder_client.get_jobs(repo_name, id=job_id)
    # If result not found, ignore
    if not result:
        LOG.info("We could not find any result for repo_name: %s and " "job_id: %s" % (repo_name, job_id))
        message.ack()
        return

    result = result[0]
    buildername = result["ref_data_name"]
    resultset_id = result["result_set_id"]
    result_sets = treeherder_client.get_resultsets(repo_name, id=resultset_id)
    revision = result_sets[0]["revision"]
    action = data["action"]
    status = None

    buildername = filter_invalid_builders(buildername)

    # Treeherder can send us invalid builder names
    # https://bugzilla.mozilla.org/show_bug.cgi?id=1242038
    if buildername is None:
        status = "Builder %s was invalid." % buildername[0]

    # Backfill action
    elif action == "backfill":
        manual_backfill(revision, buildername, max_revisions=get_maxRevisions(buildername), dry_run=dry_run)
        if not dry_run:
            status = "Backfill request sent"
        else:
            status = "Dry-run mode, nothing was backfilled"

    # Send a pulse message showing what we did
    message_sender = MessageHandler()
    pulse_message = {"job_id": job_id, "action": action, "requester": data["requester"], "status": status}
    routing_key = "{}.{}".format(repo_name, action)
    try:
        message_sender.publish_message(pulse_message, routing_key)
    except:
        LOG.warning("Failed to publish message over pulse stream.")

    if not dry_run:
        # We need to ack the message to remove it from our queue
        message.ack()
Beispiel #3
0
def on_event(data, message, dry_run):
    """Automatically backfill failed jobs."""
    # Cleaning mozci caches
    buildjson.BUILDS_CACHE = {}
    query_jobs.JOBS_CACHE = {}
    payload = data["payload"]
    status = payload["status"]
    buildername = payload["buildername"]

    # Backfill a failed job
    if status in [FAILURE, WARNING]:
        buildername = filter_invalid_builders(buildername)

        # Treeherder can send us invalid builder names
        # https://bugzilla.mozilla.org/show_bug.cgi?id=1242038
        if buildername is None:
            if not dry_run:
                # We need to ack the message to remove it from our queue
                message.ack()
            return

        revision = payload["revision"]
        LOG.info("**")  # visual separator
        LOG.info("Failed job found at revision %s. Buildername: %s",
                 revision, buildername)

        try:
            # We want to ensure 1 appearance of the job on every revision
            revlist = find_backfill_revlist(
                revision=revision,
                max_revisions=get_maxRevisions(buildername),
                buildername=buildername)

            trigger_range(
                buildername=buildername,
                revisions=revlist[1:],
                times=1,
                dry_run=dry_run,
                trigger_build_if_missing=False
            )

            if not dry_run:
                # We need to ack the message to remove it from our queue
                message.ack()

        except ConnectionError:
            # The message has not been acked so we will try again
            LOG.warning("Connection error. Trying again")

        except PushlogError, e:
            # Unable to retrieve pushlog data. Please check repo_url and revision specified.
            LOG.warning(str(e))

        except Exception, e:
            # The message has not been acked so we will try again
            LOG.warning(str(e))
            raise
def on_event(data, message, dry_run, acknowledge):
    """Automatically backfill failed jobs."""
    # Cleaning mozci caches
    buildjson.BUILDS_CACHE = {}
    query_jobs.JOBS_CACHE = {}
    payload = data["payload"]
    status = payload["status"]
    buildername = payload["buildername"]

    # Backfill a failed job
    if status in [FAILURE, WARNING]:
        buildername = filter_invalid_builders(buildername)

        # Treeherder can send us invalid builder names
        # https://bugzilla.mozilla.org/show_bug.cgi?id=1242038
        if buildername is None:
            if acknowledge:
                # We need to ack the message to remove it from our queue
                message.ack()
            return

        revision = payload["revision"]
        LOG.info("**")  # visual separator
        LOG.info("Failed job found at revision %s. Buildername: %s", revision,
                 buildername)

        try:
            # We want to ensure 1 appearance of the job on every revision
            revlist = find_backfill_revlist(buildername=buildername,
                                            revision=revision)

            trigger_range(buildername=buildername,
                          revisions=revlist[1:],
                          times=1,
                          dry_run=dry_run,
                          trigger_build_if_missing=False)

            if acknowledge:
                # We need to ack the message to remove it from our queue
                message.ack()

        except ConnectionError:
            # The message has not been acked so we will try again
            LOG.warning("Connection error. Trying again")

        except PushlogError, e:
            # Unable to retrieve pushlog data. Please check repo_url and revision specified.
            LOG.warning(str(e))

        except Exception, e:
            # The message has not been acked so we will try again
            LOG.warning(str(e))
            raise
def on_event(data, message, dry_run, acknowledge, **kwargs):
    """
    Whenever PGO builds are completed in mozilla-inbound or fx-team,
    we trigger the corresponding talos jobs twice.
    """
    if ignored(data):
        if acknowledge:
            # We need to ack the message to remove it from our queue
            message.ack()
        LOG.debug("'%s' with status %i. Nothing to be done.", data["payload"]["buildername"], data["payload"]["status"])
        return 0  # SUCCESS

    # Cleaning mozci caches
    buildjson.BUILDS_CACHE = {}
    query_jobs.JOBS_CACHE = {}
    payload = data["payload"]
    buildername = payload["buildername"]
    revision = payload["revision"]

    # Treeherder can send us invalid builder names
    # https://bugzilla.mozilla.org/show_bug.cgi?id=1242038
    buildername = filter_invalid_builders(buildername)

    if buildername is None:
        if acknowledge:
            # We need to ack the message to remove it from our queue
            message.ack()
        return -1  # FAILURE

    status = trigger_talos_jobs_for_build(buildername=buildername, revision=revision, times=2, dry_run=dry_run)

    if acknowledge:
        # We need to ack the message to remove it from our queue
        message.ack()

    LOG.info("We triggered talos jobs for the build.")
    return status
def on_event(data, message, dry_run, treeherder_server_url, acknowledge, **kwargs):
    """Act upon Treeherder job events.

    Return if the outcome was successful or not
    """
    exit_code = 0  # SUCCESS

    if ignored(data):
        if acknowledge:
            # We need to ack the message to remove it from our queue
            message.ack()
        return exit_code

    # Cleaning mozci caches
    buildjson.BUILDS_CACHE = {}
    query_jobs.JOBS_CACHE = {}

    treeherder_client = TreeherderClient(server_url=treeherder_server_url)

    action = data["action"].capitalize()
    job_id = data["job_id"]
    repo_name = data["project"]
    status = None

    # We want to know the status of the job we're processing
    try:
        job_info = treeherder_client.get_jobs(repo_name, id=job_id)[0]
    except IndexError:
        LOG.info("We could not find any job_info for repo_name: %s and " "job_id: %s" % (repo_name, job_id))
        return exit_code

    buildername = job_info["ref_data_name"]

    # We want to know the revision associated for this job
    result_sets = treeherder_client.get_resultsets(repo_name, id=job_info["result_set_id"])
    revision = result_sets[0]["revision"]

    link_to_job = "{}/#/jobs?repo={}&revision={}&selectedJob={}".format(
        treeherder_server_url, repo_name, revision, job_id
    )

    LOG.info("{} action requested by {} for '{}'".format(action, data["requester"], buildername))
    LOG.info("Request for {}".format(link_to_job))

    buildername = filter_invalid_builders(buildername)

    if buildername is None:
        LOG.info("Treeherder can send us invalid builder names.")
        LOG.info("See https://bugzilla.mozilla.org/show_bug.cgi?id=1242038.")
        LOG.warning('Requested job name "%s" is invalid.' % job_info["ref_data_name"])
        exit_code = -1  # FAILURE

    # There are various actions that can be taken on a job, however, we currently
    # only process the backfill one
    elif action == "Backfill":
        exit_code = manual_backfill(revision=revision, buildername=buildername, dry_run=dry_run)
        if not dry_run:
            status = "Backfill request sent"
        else:
            status = "Dry-run mode, nothing was backfilled."
        LOG.debug(status)

    else:
        LOG.error('We were not aware of the "{}" action. Please file an issue'.format(action))
        exit_code = -1  # FAILURE

    if acknowledge:
        # We need to ack the message to remove it from our queue
        message.ack()

    return exit_code
def on_event(data, message, dry_run, treeherder_server_url, acknowledge,
             **kwargs):
    """Act upon Treeherder job events.

    Return if the outcome was successful or not
    """
    LOG.info('Acknowledge value: {}'.format(acknowledge))

    exit_code = 0  # SUCCESS

    if ignored(data):
        if acknowledge:
            # We need to ack the message to remove it from our queue
            LOG.info('Message acknowledged')
            message.ack()
        return exit_code

    # Cleaning mozci caches
    buildjson.BUILDS_CACHE = {}
    query_jobs.JOBS_CACHE = {}

    treeherder_client = TreeherderClient(server_url=treeherder_server_url)

    action = data['action'].capitalize()
    job_id = data['job_id']
    repo_name = data['project']
    status = None

    # We want to know the status of the job we're processing
    try:
        job_info = treeherder_client.get_jobs(repo_name, id=job_id)[0]
    except IndexError:
        LOG.info("We could not find any job_info for repo_name: %s and "
                 "job_id: %s" % (repo_name, job_id))
        return exit_code

    buildername = job_info["ref_data_name"]

    # We want to know the revision associated for this job
    result_sets = treeherder_client.get_resultsets(
        repo_name, id=job_info["result_set_id"])
    revision = result_sets[0]["revision"]

    link_to_job = '{}/#/jobs?repo={}&revision={}&selectedJob={}'.format(
        treeherder_server_url, repo_name, revision, job_id)

    LOG.info("{} action requested by {} for '{}'".format(
        action,
        data['requester'],
        buildername,
    ))
    LOG.info('Request for {}'.format(link_to_job))

    buildername = filter_invalid_builders(buildername)

    if buildername is None:
        LOG.info('Treeherder can send us invalid builder names.')
        LOG.info('See https://bugzilla.mozilla.org/show_bug.cgi?id=1242038.')
        LOG.warning('Requested job name "%s" is invalid.' %
                    job_info['ref_data_name'])
        exit_code = -1  # FAILURE

    # There are various actions that can be taken on a job, however, we currently
    # only process the backfill one
    elif action == "Backfill":
        exit_code = manual_backfill(
            revision=revision,
            buildername=buildername,
            dry_run=dry_run,
        )
        if not dry_run:
            status = 'Backfill request sent'
        else:
            status = 'Dry-run mode, nothing was backfilled.'
        LOG.debug(status)

    else:
        LOG.error('We were not aware of the "{}" action. Please file an issue'.
                  format(action))
        exit_code = -1  # FAILURE

    if acknowledge:
        # We need to ack the message to remove it from our queue
        LOG.info('Message acknowledged')
        message.ack()

    return exit_code
Beispiel #8
0
def on_runnable_job_event(data, message, dry_run, treeherder_host,
                          acknowledge):
    # Cleaning mozci caches
    buildjson.BUILDS_CACHE = {}
    query_jobs.JOBS_CACHE = {}

    treeherder_client = TreeherderClient(host='treeherder.allizom.org')

    # XXX:
    # Grabbing data received over pulse
    repo_name = data["project"]
    requester = data["requester"]
    resultset_id = data["resultset_id"]
    buildernames = data["buildernames"]

    resultset = treeherder_client.get_resultsets(repo_name, id=resultset_id)[0]
    revision = resultset["revision"]
    author = resultset["author"]
    status = None

    treeherder_link = TREEHERDER % {
        'host': treeherder_host,
        'repo': repo_name,
        'revision': resultset['revision']
    }

    message_sender = MessageHandler()
    if not (requester.endswith('@mozilla.com') or author == requester
            or whitelisted_users(requester)):
        # We want to see this in the alerts
        LOG.error("Notice that we're letting %s schedule jobs for %s." %
                  (requester, treeherder_link))
    '''
    # Everyone can press the button, but only authorized users can trigger jobs
    # TODO: remove this when proper LDAP identication is set up on TH
    if not (requester.endswith('@mozilla.com') or author == requester or
            whitelisted_users(requester)):

        if acknowledge:
            # Remove message from pulse queue
            message.ack()

        # We publish a message saying we will not trigger the job
        pulse_message = {
            'resultset_id': resultset_id,
            'requester': requester,
            'status': "Could not determine if the user is authorized, nothing was triggered."}
        routing_key = '{}.{}'.format(repo_name, 'runnable')
        try:
            message_sender.publish_message(pulse_message, routing_key)
        except:
            LOG.warning("Failed to publish message over pulse stream.")

        LOG.error("Requester %s is not allowed to trigger jobs on %s." %
                  (requester, treeherder_link))
        return  # Raising an exception adds too much noise
    '''

    LOG.info("New jobs requested by %s for %s" % (requester, treeherder_link))
    LOG.info("List of builders:")
    for b in buildernames:
        LOG.info("- %s" % b)

    buildernames = filter_invalid_builders(buildernames)

    # Treeherder can send us invalid builder names
    # https://bugzilla.mozilla.org/show_bug.cgi?id=1242038
    if buildernames is None:
        if acknowledge:
            # We need to ack the message to remove it from our queue
            message.ack()
        return

    builders_graph, other_builders_to_schedule = buildbot_bridge.buildbot_graph_builder(
        builders=buildernames,
        revision=revision,
        complete=False  # XXX: This can be removed when BBB is in use
    )

    if builders_graph != {}:
        mgr = TaskClusterBuildbotManager()
        mgr.schedule_graph(
            repo_name=repo_name,
            revision=revision,
            metadata={
                'name':
                'pulse_actions_graph',
                'description':
                'Adding new jobs to push via pulse_actions/treeherder for %s.'
                % requester,
                'owner':
                requester,
                'source':
                treeherder_link,
            },
            builders_graph=builders_graph,
            dry_run=dry_run)
    else:
        LOG.info("We don't have anything to schedule through TaskCluster")

    if other_builders_to_schedule:
        # XXX: We should be able to replace this once all Buildbot jobs run through BBB
        # XXX: There might be a work around with
        #      https://github.com/mozilla/mozilla_ci_tools/issues/424
        LOG.info("We're going to schedule these builders via Buildapi: %s" %
                 str(other_builders_to_schedule))
        # This is used for test jobs which need an existing Buildbot job to be scheduled
        for buildername in other_builders_to_schedule:
            trigger_job(revision, buildername, dry_run=dry_run)
    else:
        LOG.info("We don't have anything to schedule through Buildapi")

    # Send a pulse message showing what we did
    message_sender = MessageHandler()
    pulse_message = {
        'resultset_id': resultset_id,
        'graph': builders_graph,
        'requester': requester,
        'status': status
    }
    routing_key = '{}.{}'.format(repo_name, 'runnable')
    try:
        message_sender.publish_message(pulse_message, routing_key)
    except:
        LOG.warning("Failed to publish message over pulse stream.")

    if acknowledge:
        # We need to ack the message to remove it from our queue
        message.ack()
Beispiel #9
0
def on_event(data, message, dry_run, treeherder_server_url, **kwargs):
    """Act upon Treeherder job events.

    Return if the outcome was successful or not
    """
    exit_code = 0  # SUCCESS

    if ignored(data):
        return exit_code

    # Cleaning mozci caches
    buildjson.BUILDS_CACHE = {}
    query_jobs.JOBS_CACHE = {}

    treeherder_client = TreeherderClient(server_url=treeherder_server_url)

    action = data['action'].capitalize()
    job_id = data['job_id']
    repo_name = data['project']
    status = None

    # We want to know the status of the job we're processing
    try:
        job_info = treeherder_client.get_jobs(repo_name, id=job_id)[0]
    except IndexError:
        LOG.info("We could not find any job_info for repo_name: %s and "
                 "job_id: %s" % (repo_name, job_id))
        return exit_code

    # We want to know the revision associated for this job
    result_set = treeherder_client.get_resultsets(
        repo_name, id=job_info["result_set_id"])[0]
    revision = result_set["revision"]

    link_to_job = '{}/#/jobs?repo={}&revision={}&selectedJob={}'.format(
        treeherder_server_url, repo_name, revision, job_id)

    # There are various actions that can be taken on a job, however, we currently
    # only process the backfill one
    if action == "Backfill":
        if job_info["build_system_type"] == "taskcluster":
            jobs = []
            jobs_per_call = 250
            offset = 0
            while True:
                results = treeherder_client.get_jobs(
                    repo_name,
                    push_id=job_info["result_set_id"],
                    count=jobs_per_call,
                    offset=offset)
                jobs += results
                if (len(results) < jobs_per_call):
                    break
                offset += jobs_per_call

            decision = [
                t for t in jobs if t["job_type_name"] == "Gecko Decision Task"
            ][0]
            details = treeherder_client.get_job_details(
                job_guid=decision["job_guid"])
            inspect = [
                detail["url"] for detail in details
                if detail["value"] == "Inspect Task"
            ][0]
            # Pull out the taskId from the URL e.g.
            # oN1NErz_Rf2DZJ1hi7YVfA from <tc_tools_site>/task-inspector/#oN1NErz_Rf2DZJ1hi7YVfA/
            decision_id = inspect.partition("#")[-1].rpartition("/")[0]
            mgr = TaskClusterManager(dry_run=dry_run)
            mgr.schedule_action_task(decision_id=decision_id,
                                     action="backfill",
                                     action_args={
                                         "project": repo_name,
                                         "job": job_info["id"]
                                     })

        else:
            buildername = job_info["ref_data_name"]

            LOG.info("{} action requested by {} for '{}'".format(
                action,
                data['requester'],
                buildername,
            ))
            LOG.info('Request for {}'.format(link_to_job))

            buildername = filter_invalid_builders(buildername)

            if buildername is None:
                LOG.info('Treeherder can send us invalid builder names.')
                LOG.info(
                    'See https://bugzilla.mozilla.org/show_bug.cgi?id=1242038.'
                )
                LOG.warning('Requested job name "%s" is invalid.' %
                            job_info['ref_data_name'])
                exit_code = -1  # FAILURE
            else:
                exit_code = manual_backfill(
                    revision=revision,
                    buildername=buildername,
                    dry_run=dry_run,
                )
                if not dry_run:
                    status = 'Backfill request sent'
                else:
                    status = 'Dry-run mode, nothing was backfilled.'
                LOG.debug(status)

    else:
        LOG.error('We were not aware of the "{}" action. Please file an issue'.
                  format(action))
        exit_code = -1  # FAILURE

    return exit_code
def on_runnable_job_event(data, message, dry_run, stage):
    # Cleaning mozci caches
    buildjson.BUILDS_CACHE = {}
    query_jobs.JOBS_CACHE = {}

    if stage:
        treeherder_client = TreeherderClient(host='treeherder.allizom.org')
    else:
        treeherder_client = TreeherderClient()

    # Grabbing data received over pulse
    repo_name = data["project"]
    requester = data["requester"]
    resultset_id = data["resultset_id"]
    buildernames = data["buildernames"]

    resultset = treeherder_client.get_resultsets(repo_name, id=resultset_id)[0]
    revision = resultset["revision"]
    author = resultset["author"]
    status = None

    treeherder_link = TREEHERDER % {'repo': repo_name, 'revision': resultset['revision']}

    message_sender = MessageHandler()
    # Everyone can press the button, but only authorized users can trigger jobs
    # TODO: remove this when proper LDAP identication is set up on TH
    if not (requester.endswith('@mozilla.com') or author == requester or
            whitelisted_users(requester)):

        if not dry_run:
            # Remove message from pulse queue
            message.ack()

        # We publish a message saying we will not trigger the job
        pulse_message = {
            'resultset_id': resultset_id,
            'requester': requester,
            'status': "Could not determine if the user is authorized, nothing was triggered."}
        routing_key = '{}.{}'.format(repo_name, 'runnable')
        try:
            message_sender.publish_message(pulse_message, routing_key)
        except:
            LOG.warning("Failed to publish message over pulse stream.")

        LOG.error("Requester %s is not allowed to trigger jobs on %s." %
                  (requester, treeherder_link))
        return  # Raising an exception adds too much noise

    LOG.info("New jobs requested by %s for %s" % (requester, treeherder_link))
    LOG.info("List of builders:")
    for b in buildernames:
        LOG.info("- %s" % b)

    buildernames = filter_invalid_builders(buildernames)

    # Treeherder can send us invalid builder names
    # https://bugzilla.mozilla.org/show_bug.cgi?id=1242038
    if buildernames is None:
        if not dry_run:
            # We need to ack the message to remove it from our queue
            message.ack()
        return

    builders_graph, other_builders_to_schedule = buildbot_bridge.buildbot_graph_builder(
        builders=buildernames,
        revision=revision,
        complete=False  # XXX: This can be removed when BBB is in use
    )

    if builders_graph != {}:
        mgr = TaskClusterBuildbotManager()
        mgr.schedule_graph(
            repo_name=repo_name,
            revision=revision,
            metadata={
                'name': 'pulse_actions_graph',
                'description':
                    'Adding new jobs to push via pulse_actions/treeherder for %s.' % requester,
                'owner': requester,
                'source': treeherder_link,
            },
            builders_graph=builders_graph,
            dry_run=dry_run)
    else:
        LOG.info("We don't have anything to schedule through TaskCluster")

    if other_builders_to_schedule:
        # XXX: We should be able to replace this once all Buildbot jobs run through BBB
        # XXX: There might be a work around with
        #      https://github.com/mozilla/mozilla_ci_tools/issues/424
        LOG.info("We're going to schedule these builders via Buildapi.")
        # This is used for test jobs which need an existing Buildbot job to be scheduled
        for buildername in other_builders_to_schedule:
            trigger_job(revision, buildername, dry_run=dry_run)
    else:
        LOG.info("We don't have anything to schedule through Buildapi")

    # Send a pulse message showing what we did
    message_sender = MessageHandler()
    pulse_message = {
        'resultset_id': resultset_id,
        'graph': builders_graph,
        'requester': requester,
        'status': status}
    routing_key = '{}.{}'.format(repo_name, 'runnable')
    try:
        message_sender.publish_message(pulse_message, routing_key)
    except:
        LOG.warning("Failed to publish message over pulse stream.")

    if not dry_run:
        # We need to ack the message to remove it from our queue
        message.ack()
Beispiel #11
0
def on_event(data, message, dry_run, treeherder_server_url, **kwargs):
    if ignored(data):
        return 0  # SUCCESS

    # Grabbing data received over pulse
    repo_name = data["project"]
    requester = data["requester"]
    resultset_id = data["resultset_id"]

    if "requested_jobs" in data:
        requested_jobs = data["requested_jobs"]
    else:
        LOG.error("Appropriate job requests not found in the pulse message.")
        return -1

    treeherder_client = TreeherderClient(server_url=treeherder_server_url)
    resultset = treeherder_client.get_resultsets(repo_name, id=resultset_id)[0]
    revision = resultset["revision"]
    author = resultset["author"]

    treeherder_link = TREEHERDER % {
        'treeherder_server_url': treeherder_server_url,
        'repo': repo_name,
        'revision': resultset['revision']
    }
    metadata = {
        'name':
        'pulse_actions_graph',
        'description':
        'Adding new jobs to push via pulse_actions/treeherder for %s.' %
        requester,
        'owner':
        requester,
        'source':
        treeherder_link,
    }

    if not (requester.endswith('@mozilla.com') or author == requester
            or whitelisted_users(requester)):
        # We want to see this in the alerts
        LOG.warning(
            "Notice that we're letting {} schedule jobs for {}.".format(
                requester, treeherder_link))

    LOG.info("New jobs requested by %s for %s" % (requester, treeherder_link))
    LOG.info("List of requested jobs:")
    for job in requested_jobs:
        LOG.info("- {}".format(job))

    # This is empty strings in non-try pulse messages
    # Remove support for `decisionTaskID` once bug 1286897 fixed.
    decision_task_id = data.get('decision_task_id', data.get('decisionTaskID'))

    # Separate Buildbot buildernames from TaskCluster task labels
    if decision_task_id:
        task_labels = [
            x for x in requested_jobs
            if is_taskcluster_label(x, decision_task_id)
        ]
    else:
        task_labels = []

    # Treeherder can send us invalid builder names
    # https://bugzilla.mozilla.org/show_bug.cgi?id=1242038
    buildernames = filter_invalid_builders(
        list(set(requested_jobs) - set(task_labels)))

    # XXX: In the future handle return codes
    add_taskcluster_jobs(task_labels, decision_task_id, repo_name, dry_run)
    add_buildbot_jobs(repo_name, revision, buildernames, metadata, dry_run)

    return 0  # SUCCESS