Esempio n. 1
0
def manual_backfill(revision, buildername, max_pushes=None, dry_run=False):
    """
    This function is used to trigger jobs for a range of revisions
    when a user clicks the backfill icon for a job on Treeherder.

    It backfills to the last known job on Treeherder.
    """
    max_pushes = max_pushes if max_pushes is not None else get_max_pushes(buildername)
    repo_url = query_repo_url_from_buildername(buildername)
    # We want to use data from treeherder for manual backfilling for long term.
    set_query_source("treeherder")
    revlist = query_pushes_by_specified_revision_range(
        repo_url=repo_url,
        revision=revision,
        before=max_pushes,
        after=-1,  # We don't want the current job in the revision to be included.
        return_revision_list=True)
    filtered_revlist = _filter_backfill_revlist(buildername, revlist, only_successful=False)
    trigger_range(
        buildername=buildername,
        revisions=filtered_revlist,
        times=1,
        dry_run=dry_run,
        extra_properties={
            'mozci_request': {
                'type': 'manual_backfill',
                'builders': [buildername]}
            }
    )
Esempio n. 2
0
def manual_backfill(revision, buildername, max_pushes=None, dry_run=False):
    """
    This function is used to trigger jobs for a range of revisions
    when a user clicks the backfill icon for a job on Treeherder.

    It backfills to the last known job on Treeherder.
    """
    max_pushes = max_pushes if max_pushes is not None else get_max_pushes(buildername)
    repo_url = query_repo_url_from_buildername(buildername)
    # We want to use data from treeherder for manual backfilling for long term.
    set_query_source("treeherder")
    revlist = query_pushes_by_specified_revision_range(
        repo_url=repo_url,
        revision=revision,
        before=max_pushes,
        after=-1,  # We don't want the current job in the revision to be included.
        return_revision_list=True)

    filtered_revlist = revlist
    # Talos jobs are generally always green and we want to fill in all holes in a range
    if 'talos' not in buildername:
        filtered_revlist = _filter_backfill_revlist(buildername, revlist, only_successful=False)

    trigger_range(
        buildername=buildername,
        revisions=filtered_revlist,
        times=1,
        dry_run=dry_run,
        extra_properties={
            'mozci_request': {
                'type': 'manual_backfill',
                'builders': [buildername]}
            }
    )
Esempio n. 3
0
def manual_backfill(revision, buildername, dry_run=False):
    """
    This function is used to trigger jobs for a range of revisions
    when a user clicks the backfill icon for a job on Treeherder.

    It backfills to the last known job on Treeherder.
    """
    factor = 1.5
    seta_skip = get_max_pushes(buildername)
    # Use SETA's skip pushes times factor
    max_pushes = seta_skip * factor
    repo_url = query_repo_url_from_buildername(buildername)
    # We want to use data from treeherder for manual backfilling for long term.
    set_query_source("treeherder")

    revlist = query_pushes_by_specified_revision_range(
        repo_url=repo_url,
        revision=revision,
        before=max_pushes,
        after=-1,  # We don't want the current job in the revision to be included.
        return_revision_list=True)

    LOG.info("We're *aiming* to backfill; note that we ignore the revision that you request "
             "to backfill from ({}) up to {} pushes (seta skip: {}; factor: {}) "
             "and we backfill up to the last green if found.".format(
                 revision[:12], max_pushes, seta_skip, factor))
    LOG.info("https://treeherder.mozilla.org/#/jobs?repo={}&filter-searchStr={}"
             "&tochange={}&fromchange={}".format(
                 repo_url.split('/')[-1],
                 buildername,
                 revlist[-1],
                 revlist[0],
             ))

    filtered_revlist = revlist
    # Talos jobs are generally always green and we want to fill in all holes in a range
    if 'talos' not in buildername:
        filtered_revlist = _filter_backfill_revlist(buildername, list(reversed(revlist)),
                                                    only_successful=True)

    if len(filtered_revlist) == 0:
        LOG.info("We don't have a revision list to work with.")
        return

    if len(revlist) != len(filtered_revlist):
        LOG.info("NOTICE: We were aiming for a revlist of {}, however, we only "
                 "need to backfill {} revisions".format(len(revlist), len(filtered_revlist)))

    trigger_range(
        buildername=buildername,
        revisions=filtered_revlist,
        times=1,
        dry_run=dry_run,
        extra_properties={
            'mozci_request': {
                'type': 'manual_backfill',
                'builders': [buildername]}
            }
    )
Esempio n. 4
0
def manual_backfill(revision, buildername, dry_run=False):
    """
    This function is used to trigger jobs for a range of revisions
    when a user clicks the backfill icon for a job on Treeherder.

    It backfills to the last known job on Treeherder.
    """
    factor = 1.5
    seta_skip = get_max_pushes(buildername)
    # Use SETA's skip pushes times factor
    max_pushes = seta_skip * factor
    repo_url = query_repo_url_from_buildername(buildername)
    # We want to use data from treeherder for manual backfilling for long term.
    set_query_source("treeherder")

    revlist = query_pushes_by_specified_revision_range(
        repo_url=repo_url,
        revision=revision,
        before=max_pushes,
        after=-1,  # We don't want the current job in the revision to be included.
        return_revision_list=True)

    LOG.info("We're *aiming* to backfill; note that we ignore the revision that you request "
             "to backfill from ({}) up to {} pushes (seta skip: {}; factor: {}) "
             "and we backfill up to the last green if found.".format(
                 revision[:12], max_pushes, seta_skip, factor))
    LOG.info("https://treeherder.mozilla.org/#/jobs?repo={}&filter-searchStr={}"
             "&tochange={}&fromchange={}".format(
                 repo_url.split('/')[-1],
                 buildername,
                 revlist[-1],
                 revlist[0],
             ))

    filtered_revlist = revlist
    # Talos jobs are generally always green and we want to fill in all holes in a range
    if 'talos' not in buildername:
        filtered_revlist = _filter_backfill_revlist(buildername, list(reversed(revlist)),
                                                    only_successful=True)

    if len(filtered_revlist) == 0:
        LOG.info("We don't have a revision list to work with.")
        return

    if len(revlist) != len(filtered_revlist):
        LOG.info("NOTICE: We were aiming for a revlist of {}, however, we only "
                 "need to backfill {} revisions".format(len(revlist), len(filtered_revlist)))

    trigger_range(
        buildername=buildername,
        revisions=filtered_revlist,
        times=1,
        dry_run=dry_run,
        extra_properties={
            'mozci_request': {
                'type': 'manual_backfill',
                'builders': [buildername]}
            }
    )
Esempio n. 5
0
def find_backfill_revlist(buildername, revision, max_pushes=None):
    """Determine which revisions we need to trigger in order to backfill.

    This function is generally called by automatic backfilling on pulse_actions.
    We need to take into consideration that a job might not be run for many revisions
    due to SETA.  We also might have a permanent failure appear after a reconfiguration
    (a new job is added).

    When a permanent failure appears, we keep on adding load unnecessarily
    by triggering coalesced jobs in between pushes.

    Long lived failing job (it could be hidden):
    * push N   -> failed job
    * push N-1 -> failed/coalesced job
    * push N-2 -> failed/coalesced job
    ...
    * push N-max_pushes-1 -> failed/coalesced job

    If the list of revision we need to trigger is larger than max_pushes
    it means that we either have not had that job scheduled beyond max_pushes
    or it has been failing forever.
    """
    max_pushes = max_pushes if max_pushes is not None else get_max_pushes(
        buildername)
    # XXX: There is a chance that a green job has run in a newer push (the priority was higher),
    # however, this is unlikely.

    # XXX: We might need to consider when a backout has already landed and stop backfilling
    LOG.info("BACKFILL-START:%s_%s begins." % (revision[0:8], buildername))

    revlist = query_pushes_by_specified_revision_range(
        repo_url=query_repo_url_from_buildername(buildername),
        revision=revision,
        before=max_pushes - 1,
        after=0,
        return_revision_list=True)
    new_revlist = _filter_backfill_revlist(buildername,
                                           revlist,
                                           only_successful=True)

    if len(new_revlist) >= max_pushes:
        # It is likely that we are facing a long lived permanent failure
        LOG.debug(
            "We're not going to backfill %s since it is likely to be a permanent "
            "failure." % buildername)
        LOG.info("BACKFILL-END:%s_%s will not backfill." %
                 (revision[0:8], buildername))
        return []
    else:
        LOG.info("BACKFILL-END:%s_%s will backfill %s." %
                 (revision[0:8], buildername, new_revlist))
        return new_revlist
Esempio n. 6
0
def find_backfill_revlist(buildername, revision, max_pushes=None):
    """Determine which revisions we need to trigger in order to backfill.

    This function is generally called by automatic backfilling on pulse_actions.
    We need to take into consideration that a job might not be run for many revisions
    due to SETA.  We also might have a permanent failure appear after a reconfiguration
    (a new job is added).

    When a permanent failure appears, we keep on adding load unnecessarily
    by triggering coalesced jobs in between pushes.

    Long lived failing job (it could be hidden):
    * push N   -> failed job
    * push N-1 -> failed/coalesced job
    * push N-2 -> failed/coalesced job
    ...
    * push N-max_pushes-1 -> failed/coalesced job

    If the list of revision we need to trigger is larger than max_pushes
    it means that we either have not had that job scheduled beyond max_pushes
    or it has been failing forever.
    """
    max_pushes = max_pushes if max_pushes is not None else get_max_pushes(buildername)
    # XXX: There is a chance that a green job has run in a newer push (the priority was higher),
    # however, this is unlikely.

    # XXX: We might need to consider when a backout has already landed and stop backfilling
    LOG.info("BACKFILL-START:%s_%s begins." % (revision[0:8], buildername))

    revlist = query_pushes_by_specified_revision_range(
        repo_url=query_repo_url_from_buildername(buildername),
        revision=revision,
        before=max_pushes - 1,
        after=0,
        return_revision_list=True
    )
    new_revlist = _filter_backfill_revlist(buildername, revlist, only_successful=True)

    if len(new_revlist) >= max_pushes:
        # It is likely that we are facing a long lived permanent failure
        LOG.debug("We're not going to backfill %s since it is likely to be a permanent "
                  "failure." % buildername)
        LOG.info("BACKFILL-END:%s_%s will not backfill." % (revision[0:8], buildername))
        return []
    else:
        LOG.info("BACKFILL-END:%s_%s will backfill %s." %
                 (revision[0:8], buildername, new_revlist))
        return new_revlist
Esempio n. 7
0
 def test_get_max_pushes_with_no_seta(self, fetch_allthethings_data):
     """get_max_pushes should return the number of pushes associated to the SETA scheduler."""
     fetch_allthethings_data.return_value = ALLTHETHINGS
     assert get_max_pushes(
         "Platform2 mozilla-beta talos tp5o-e10s") == MAX_PUSHES
Esempio n. 8
0
 def test_get_max_pushes_with_seta(self, fetch_allthethings_data):
     """get_max_pushes should return the number of pushes associated to the SETA scheduler."""
     fetch_allthethings_data.return_value = ALLTHETHINGS
     assert get_max_pushes(
         "Windows 7 VM 32-bit mozilla-inbound opt test mochitest-2") == 8
Esempio n. 9
0
 def test_get_max_pushes_with_no_seta(self, fetch_allthethings_data):
     """get_max_pushes should return the number of pushes associated to the SETA scheduler."""
     fetch_allthethings_data.return_value = ALLTHETHINGS
     assert get_max_pushes("Platform2 mozilla-beta talos tp5o") == MAX_PUSHES
Esempio n. 10
0
 def test_get_max_pushes_with_seta(self, fetch_allthethings_data):
     """get_max_pushes should return the number of pushes associated to the SETA scheduler."""
     fetch_allthethings_data.return_value = ALLTHETHINGS
     assert get_max_pushes("Windows 7 VM 32-bit mozilla-inbound opt test mochitest-2") == 8
Esempio n. 11
0
 def test_get_max_pushes_with_seta(self, fetch_allthethings_data):
     """get_max_pushes should return the number of pushes associated to the SETA scheduler."""
     fetch_allthethings_data.return_value = ALLTHETHINGS
     assert get_max_pushes(
         "Ubuntu VM 12.04 x64 fx-team opt test mochitest-2") == 7
Esempio n. 12
0
 def test_get_max_pushes_with_seta(self, fetch_allthethings_data):
     """get_max_pushes should return the number of pushes associated to the SETA scheduler."""
     fetch_allthethings_data.return_value = MOCK_ALLTHETHINGS
     self.assertEquals(
         get_max_pushes("Rev4 MacOSX Snow Leopard 10.6 fx-team debug test cppunit"), 7)
Esempio n. 13
0
 def test_get_max_pushes_with_seta(self, fetch_allthethings_data):
     """get_max_pushes should return the number of pushes associated to the SETA scheduler."""
     fetch_allthethings_data.return_value = ALLTHETHINGS
     assert get_max_pushes("Ubuntu VM 12.04 x64 fx-team opt test mochitest-2") == 7