def on_buildbot_event(data, message, dry_run):
    """Act upon buildbot events."""
    # Pulse gives us a job_id and a job_guid, we need request_id.
    repo_name = data['project']
    job_id = data['job_id']
    request_id = get_request_id_from_job_id(repo_name, job_id)
    action = data['action']
    status = None

    # Re-trigger action
    if action == 'retrigger':
        make_retrigger_request(repo_name, request_id, dry_run=dry_run)
        if not dry_run:
            status = 'Retrigger request sent'
        else:
            status = 'Dry-mode, nothing was retriggered'
    # Cancel action
    elif action == 'cancel':
        make_cancel_request(repo_name, request_id, dry_run=dry_run)
        if not dry_run:
            status = 'Cancel request sent'
        else:
            status = 'Dry-run mode, nothing was cancelled'
    # Send a pulse message showing what we did
    message_sender = MessageHandler()
    pulse_message = {
        'job_id': job_id,
        'request_id': request_id,
        'action': action,
        'requester': data['requester'],
        'status': status}
    routing_key = '{}.{}'.format(repo_name, action)
    message_sender.publish_message(pulse_message, routing_key)
    # We need to ack the message to remove it from our queue
    message.ack()
Example #2
0
def trigger_range(buildername, revisions, times=1, dry_run=False, files=None):
    """
    Schedule the job named "buildername" ("times" times) from "start_revision" to
    "end_revision".
    """
    repo_name = query_repo_name_from_buildername(buildername)
    LOG.info("We want to have %s job(s) of %s on revisions %s" %
             (times, buildername, str(revisions)))
    for rev in revisions:
        LOG.info("")
        LOG.info("=== %s ===" % rev)
        if not buildapi.valid_revision(repo_name, rev):
            LOG.info("We can't trigger anything on pushes that the revision is not valid for "
                     "buildapi.")
            continue

        LOG.info("We want to have %s job(s) of %s on revision %s" %
                 (times, buildername, rev))

        # 1) How many potentially completed jobs can we get for this buildername?
        jobs = query_jobs(repo_name, rev)
        matching_jobs = _matching_jobs(buildername, jobs)
        successful_jobs, pending_jobs, running_jobs = _status_summary(matching_jobs)[0:3]

        potential_jobs = pending_jobs + running_jobs + successful_jobs
        LOG.debug("We found %d pending jobs, %d running jobs and %d successful_jobs." %
                  (pending_jobs, running_jobs, successful_jobs))

        if potential_jobs >= times:
            LOG.info("We have %d job(s) for '%s' which is enough for the %d job(s) we want." %
                     (potential_jobs, buildername, times))

        else:
            # 2) If we have less potential jobs than 'times' instances then
            #    we need to fill it in.
            LOG.info("We have found %d potential job(s) matching '%s' on %s. "
                     "We need to trigger more." % (potential_jobs, buildername, rev))

            # If a job matching what we want already exists, we can
            # use the retrigger API in self-serve to retrigger that
            # instead of creating a new arbitrary job
            if len(matching_jobs) > 0:
                request_id = matching_jobs[0]["requests"][0]["request_id"]
                buildapi.make_retrigger_request(
                    repo_name,
                    request_id,
                    count=(times - potential_jobs),
                    dry_run=dry_run)

            # If no matching job exists, we have to trigger a new arbitrary job
            else:
                list_of_requests = trigger_job(
                    revision=rev,
                    buildername=buildername,
                    times=(times - potential_jobs),
                    dry_run=dry_run,
                    files=files)

                if list_of_requests and any(req.status_code != 202 for req in list_of_requests):
                    LOG.warning("Not all requests succeeded.")
 def test_call_with_different_count(self, get_credentials, post):
     """make_retrigger_request should call requests.post with the right count."""
     buildapi.make_retrigger_request("repo", "1234567", count=10, dry_run=False)
     post.assert_called_once_with(
         '%s/%s/request' % (buildapi.HOST_ROOT, "repo"),
         headers={'Accept': 'application/json'},
         data={'count': 10, 'priority': 0, 'request_id': '1234567'},
         auth=get_credentials())
 def test_call_without_dry_run(self, get_credentials, post):
     """trigger_arbitrary_job should call requests.post."""
     buildapi.make_retrigger_request("repo", "1234567", dry_run=False)
     # We expect that make_retrigger_request will call requests.post
     # once with the following arguments
     post.assert_called_once_with('%s/%s/request' %
                                  (buildapi.HOST_ROOT, "repo"),
                                  headers={'Accept': 'application/json'},
                                  data={'request_id': '1234567'},
                                  auth=get_credentials())
 def test_call_without_dry_run(self, get_credentials, post):
     """trigger_arbitrary_job should call requests.post."""
     buildapi.make_retrigger_request("repo", "1234567", dry_run=False)
     # We expect that make_retrigger_request will call requests.post
     # once with the following arguments
     post.assert_called_once_with(
         '%s/%s/request' % (buildapi.HOST_ROOT, "repo"),
         headers={'Accept': 'application/json'},
         data={'request_id': '1234567'},
         auth=get_credentials())
 def test_call_with_different_count(self, get_credentials, post):
     """make_retrigger_request should call requests.post with the right count."""
     buildapi.make_retrigger_request("repo",
                                     "1234567",
                                     count=10,
                                     dry_run=False)
     post.assert_called_once_with('%s/%s/request' %
                                  (buildapi.HOST_ROOT, "repo"),
                                  headers={'Accept': 'application/json'},
                                  data={
                                      'count': 10,
                                      'priority': 0,
                                      'request_id': '1234567'
                                  },
                                  auth=get_credentials())
 def test_call_with_dry_run(self, get_credentials, post):
     """make_retrigger_request should return None when dry_run is True."""
     self.assertEquals(
         buildapi.make_retrigger_request("repo", "1234567", dry_run=True),
         None)
     # make_retrigger_request should not call requests.post when dry_run is True
     assert post.call_count == 0
 def retrigger(self, uuid, *args, **kwargs):
     return buildapi.make_retrigger_request(request_id=uuid,
                                            *args,
                                            **kwargs)
Example #9
0
def trigger_range(buildername,
                  revisions,
                  times=1,
                  dry_run=False,
                  files=None,
                  extra_properties=None,
                  trigger_build_if_missing=True):
    """Schedule the job named "buildername" ("times" times) in every revision on 'revisions'."""
    repo_name = query_repo_name_from_buildername(buildername)
    repo_url = buildapi.query_repo_url(repo_name)

    if revisions != []:
        LOG.info("We want to have %s job(s) of %s on revisions %s" %
                 (times, buildername, str(revisions)))

    for rev in revisions:
        LOG.info("")
        LOG.info("=== %s ===" % rev)
        if VALIDATE and not pushlog.valid_revision(repo_url, rev):
            LOG.info(
                "We can't trigger anything on pushes without a valid revision."
            )
            continue

        LOG.info("We want to have %s job(s) of %s on revision %s" %
                 (times, buildername, rev))

        # 1) How many potentially completed jobs can we get for this buildername?
        matching_jobs = QUERY_SOURCE.get_matching_jobs(repo_name, rev,
                                                       buildername)
        successful_jobs, pending_jobs, running_jobs, _, failed_jobs = \
            _status_summary(matching_jobs)

        potential_jobs = pending_jobs + running_jobs + successful_jobs + failed_jobs
        # TODO: change this debug message when we have a less hardcoded _status_summary
        LOG.debug("We found %d pending/running jobs, %d successful jobs and "
                  "%d failed jobs" %
                  (pending_jobs + running_jobs, successful_jobs, failed_jobs))

        if potential_jobs >= times:
            LOG.info(
                "We have %d job(s) for '%s' which is enough for the %d job(s) we want."
                % (potential_jobs, buildername, times))

        else:
            # 2) If we have less potential jobs than 'times' instances then
            #    we need to fill it in.
            LOG.info("We have found %d potential job(s) matching '%s' on %s. "
                     "We need to trigger more." %
                     (potential_jobs, buildername, rev))

            # If a job matching what we want already exists, we can
            # use the retrigger API in self-serve to retrigger that
            # instead of creating a new arbitrary job
            if len(matching_jobs) > 0 and files is None:
                request_id = QUERY_SOURCE.get_buildapi_request_id(
                    repo_name, matching_jobs[0])
                buildapi.make_retrigger_request(repo_name,
                                                request_id,
                                                count=(times - potential_jobs),
                                                dry_run=dry_run)

            # If no matching job exists, we have to trigger a new arbitrary job
            else:
                list_of_requests = trigger_job(
                    revision=rev,
                    buildername=buildername,
                    times=(times - potential_jobs),
                    dry_run=dry_run,
                    files=files,
                    extra_properties=extra_properties,
                    trigger_build_if_missing=trigger_build_if_missing)

                if list_of_requests and any(req.status_code != 202
                                            for req in list_of_requests):
                    LOG.warning("Not all requests succeeded.")
 def retrigger(self, uuid, *args, **kwargs):
     return buildapi.make_retrigger_request(request_id=uuid, *args, **kwargs)
Example #11
0
def main():
    options = parse_args()
    validate_options(options)
    valid_credentials()

    if options.debug:
        LOG = setup_logging(logging.DEBUG)
    else:
        LOG = setup_logging(logging.INFO)

    # Setting the QUERY_SOURCE global variable in mozci.py
    set_query_source(options.query_source)

    if options.buildernames:
        options.buildernames = sanitize_buildernames(options.buildernames)
        repo_url = query_repo_url_from_buildername(options.buildernames[0])

    if not options.repo_name:
        options.repo_name = query_repo_name_from_buildername(options.buildernames[0])

    if options.rev == 'tip':
        repo_url = query_repo_url(options.repo_name)
        options.rev = query_repo_tip(repo_url)
        LOG.info("The tip of %s is %s", options.repo_name, options.rev)

    if options.coalesced:
        query_api = BuildApi()
        request_ids = query_api.find_all_jobs_by_status(options.repo_name,
                                                        options.rev, COALESCED)
        if len(request_ids) == 0:
            LOG.info('We did not find any coalesced job')
        for request_id in request_ids:
            make_retrigger_request(repo_name=options.repo_name,
                                   request_id=request_id,
                                   dry_run=options.dry_run)

        return

    for buildername in options.buildernames:
        revlist = determine_revlist(
            repo_url=repo_url,
            buildername=buildername,
            rev=options.rev,
            back_revisions=options.back_revisions,
            delta=options.delta,
            from_rev=options.from_rev,
            backfill=options.backfill,
            skips=options.skips,
            max_revisions=options.max_revisions)

        try:
            trigger_range(
                buildername=buildername,
                revisions=revlist,
                times=options.times,
                dry_run=options.dry_run,
                files=options.files,
                trigger_build_if_missing=options.trigger_build_if_missing
            )
        except Exception, e:
            LOG.exception(e)
            exit(1)

        if revlist:
            LOG.info('https://treeherder.mozilla.org/#/jobs?%s' %
                     urllib.urlencode({'repo': options.repo_name,
                                       'fromchange': revlist[-1],
                                       'tochange': revlist[0],
                                       'filter-searchStr': buildername}))
Example #12
0
def trigger_range(buildername, revisions, times=1, dry_run=False,
                  files=None, extra_properties=None, trigger_build_if_missing=True):
    """Schedule the job named "buildername" ("times" times) in every revision on 'revisions'."""
    repo_name = query_repo_name_from_buildername(buildername)
    repo_url = buildapi.query_repo_url(repo_name)

    if revisions != []:
        LOG.info("We want to have %s job(s) of %s on revisions %s" %
                 (times, buildername, str(revisions)))

    for rev in revisions:
        LOG.info("")
        LOG.info("=== %s ===" % rev)
        if VALIDATE and not pushlog.valid_revision(repo_url, rev):
            LOG.info("We can't trigger anything on pushes without a valid revision.")
            continue

        LOG.info("We want to have %s job(s) of %s on revision %s" %
                 (times, buildername, rev))

        # 1) How many potentially completed jobs can we get for this buildername?
        matching_jobs = QUERY_SOURCE.get_matching_jobs(repo_name, rev, buildername)
        successful_jobs, pending_jobs, running_jobs, _, failed_jobs = \
            _status_summary(matching_jobs)

        potential_jobs = pending_jobs + running_jobs + successful_jobs + failed_jobs
        # TODO: change this debug message when we have a less hardcoded _status_summary
        LOG.debug("We found %d pending/running jobs, %d successful jobs and "
                  "%d failed jobs" % (pending_jobs + running_jobs, successful_jobs, failed_jobs))

        if potential_jobs >= times:
            LOG.info("We have %d job(s) for '%s' which is enough for the %d job(s) we want." %
                     (potential_jobs, buildername, times))

        else:
            # 2) If we have less potential jobs than 'times' instances then
            #    we need to fill it in.
            LOG.info("We have found %d potential job(s) matching '%s' on %s. "
                     "We need to trigger more." % (potential_jobs, buildername, rev))

            # If a job matching what we want already exists, we can
            # use the retrigger API in self-serve to retrigger that
            # instead of creating a new arbitrary job
            if len(matching_jobs) > 0 and files is None:
                request_id = QUERY_SOURCE.get_buildapi_request_id(repo_name, matching_jobs[0])
                buildapi.make_retrigger_request(
                    repo_name,
                    request_id,
                    count=(times - potential_jobs),
                    dry_run=dry_run)

            # If no matching job exists, we have to trigger a new arbitrary job
            else:
                list_of_requests = trigger_job(
                    revision=rev,
                    buildername=buildername,
                    times=(times - potential_jobs),
                    dry_run=dry_run,
                    files=files,
                    extra_properties=extra_properties,
                    trigger_build_if_missing=trigger_build_if_missing)

                if list_of_requests and any(req.status_code != 202 for req in list_of_requests):
                    LOG.warning("Not all requests succeeded.")
 def test_call_with_dry_run(self, get_credentials, post):
     """make_retrigger_request should return None when dry_run is True."""
     self.assertEquals(
         buildapi.make_retrigger_request("repo", "1234567", dry_run=True), None)
     # make_retrigger_request should not call requests.post when dry_run is True
     assert post.call_count == 0
Example #14
0
def main():
    options = parse_args()
    validate_options(options)

    if not valid_credentials():
        sys.exit(-1)

    if options.debug:
        LOG = setup_logging(logging.DEBUG)
    else:
        LOG = setup_logging(logging.INFO)

    # Setting the QUERY_SOURCE global variable in mozci.py
    set_query_source(options.query_source)

    if options.buildernames:
        options.buildernames = sanitize_buildernames(options.buildernames)
        repo_url = query_repo_url_from_buildername(options.buildernames[0])

    if not options.repo_name:
        options.repo_name = query_repo_name_from_buildername(options.buildernames[0])

    if options.rev == 'tip':
        repo_url = query_repo_url(options.repo_name)
        options.rev = query_repo_tip(repo_url)
        LOG.info("The tip of %s is %s", options.repo_name, options.rev)

    # Mode 1: Trigger coalesced jobs
    if options.coalesced:
        query_api = BuildApi()
        request_ids = query_api.find_all_jobs_by_status(options.repo_name,
                                                        options.rev, COALESCED)
        if len(request_ids) == 0:
            LOG.info('We did not find any coalesced job')
        for request_id in request_ids:
            make_retrigger_request(repo_name=options.repo_name,
                                   request_id=request_id,
                                   dry_run=options.dry_run)

        return

    # Mode #2: Fill-in a revision
    if options.fill_revision:
        trigger_missing_jobs_for_revision(
            repo_name=options.repo_name,
            revision=options.rev,
            dry_run=options.dry_run
        )
        return

    # Mode #3: Trigger jobs based on revision list modifiers
    for buildername in options.buildernames:
        revlist = determine_revlist(
            repo_url=repo_url,
            buildername=buildername,
            rev=options.rev,
            back_revisions=options.back_revisions,
            delta=options.delta,
            from_rev=options.from_rev,
            backfill=options.backfill,
            skips=options.skips,
            max_revisions=options.max_revisions)

        try:
            trigger_range(
                buildername=buildername,
                revisions=revlist,
                times=options.times,
                dry_run=options.dry_run,
                files=options.files,
                trigger_build_if_missing=options.trigger_build_if_missing
            )
        except Exception, e:
            LOG.exception(e)
            exit(1)

        if revlist:
            LOG.info('https://treeherder.mozilla.org/#/jobs?%s' %
                     urllib.urlencode({'repo': options.repo_name,
                                       'fromchange': revlist[-1],
                                       'tochange': revlist[0],
                                       'filter-searchStr': buildername}))