Esempio n. 1
0
def new_wpt_pr(git_gecko, git_wpt, pr_data, raise_on_error=True, repo_update=True):
    """ Start a new downstream sync """
    if pr_data["user"]["login"] == env.config["web-platform-tests"]["github"]["user"]:
        raise ValueError("Tried to create a downstream sync for a PR created "
                         "by the wpt bot")
    if repo_update:
        update_repositories(git_gecko, git_wpt)
    pr_id = pr_data["number"]
    if DownstreamSync.for_pr(git_gecko, git_wpt, pr_id):
        return
    wpt_base = "origin/%s" % pr_data["base"]["ref"]

    with SyncLock("downstream", str(pr_id)) as lock:
        sync = DownstreamSync.new(lock,
                                  git_gecko,
                                  git_wpt,
                                  wpt_base,
                                  pr_id,
                                  pr_data["title"],
                                  pr_data["body"] or "")
        with sync.as_mut(lock):
            try:
                sync.update_commits()
                sync.update_github_check()
            except Exception as e:
                sync.error = e
                if raise_on_error:
                    raise
                traceback.print_exc()
                logger.error(e)
Esempio n. 2
0
def do_status(git_gecko, git_wpt, obj_type, sync_type, obj_id, *args,
              **kwargs):
    import upstream
    import downstream
    import landing
    import trypush
    if obj_type == "try":
        objs = trypush.TryPush.load_by_obj(git_gecko,
                                           sync_type,
                                           obj_id,
                                           seq_id=kwargs["seq_id"])
    else:
        if sync_type == "upstream":
            cls = upstream.UpstreamSync
        if sync_type == "downstream":
            cls = downstream.DownstreamSync
        if sync_type == "landing":
            cls = landing.LandingSync
        objs = cls.load_by_obj(git_gecko,
                               git_wpt,
                               obj_id,
                               seq_id=kwargs["seq_id"])

    if kwargs["old_status"] is not None:
        objs = {item for item in objs if item.status == kwargs["old_status"]}

    if not objs:
        logger.error("No matching syncs found")

    for obj in objs:
        logger.info("Setting status of %s to %s" %
                    (obj.process_name, kwargs["new_status"]))
        with SyncLock.for_process(obj.process_name) as lock:
            with obj.as_mut(lock):
                obj.status = kwargs["new_status"]
Esempio n. 3
0
def handle_status(git_gecko, git_wpt, event):
    newrelic.agent.set_transaction_name("handle_status")
    if event["context"] == "upstream/gecko":
        # Never handle changes to our own status
        return

    repo_update = event.get("_wptsync", {}).get("repo_update", True)
    if repo_update:
        update_repositories(None, git_wpt, False)

    rev = event["sha"]
    # First check if the PR is head of any pull request
    pr_id = pr_for_commit(git_wpt, rev)

    newrelic.agent.add_custom_parameter("rev", rev)
    newrelic.agent.add_custom_parameter("context", event["context"])
    newrelic.agent.add_custom_parameter("state", event["state"])

    if not pr_id:
        # This usually happens if we got behind, so the commit is no longer the latest one
        # There are a few possibilities for what happened:
        # * Something new was pushed. In that case ignoring this message is fine
        # * The PR got merged in a way that changes the SHAs. In that case we assume that
        #   the sync will get triggered later like when there's a push for the commit
        logger.warning(
            "Got status for commit %s which is not the current HEAD of any PR\n"
            "context: %s url: %s state: %s" %
            (rev, event["context"], event["target_url"], event["state"]))
        return
    else:
        logger.info("Got status for commit %s from PR %s\n"
                    "context: %s url: %s state: %s" %
                    (rev, pr_id, event["context"], event["target_url"],
                     event["state"]))

    sync = get_pr_sync(git_gecko, git_wpt, pr_id)

    if not sync:
        # Presumably this is a thing we ought to be downstreaming, but missed somehow
        logger.info(
            "Got a status update for PR %s which is unknown to us; starting downstreaming"
            % pr_id)
        from update import schedule_pr_task
        schedule_pr_task("opened", env.gh_wpt.get_pull(pr_id))

    update_func = None
    if isinstance(sync, upstream.UpstreamSync):
        update_func = upstream.commit_status_changed
    elif isinstance(sync, downstream.DownstreamSync):
        update_func = downstream.commit_status_changed
    if update_func is not None:
        with SyncLock.for_process(sync.process_name) as lock:
            with sync.as_mut(lock):
                update_func(git_gecko, git_wpt, sync, event["context"],
                            event["state"], event["target_url"], event["sha"])
Esempio n. 4
0
def do_landing(git_gecko, git_wpt, *args, **kwargs):
    import errors
    import landing
    import update
    current_landing = landing.current(git_gecko, git_wpt)

    accept_failures = kwargs["accept_failures"]

    def update_landing():
        landing.update_landing(git_gecko,
                               git_wpt,
                               kwargs["prev_wpt_head"],
                               kwargs["wpt_head"],
                               kwargs["include_incomplete"],
                               retry=kwargs["retry"],
                               accept_failures=accept_failures)

    if current_landing and current_landing.latest_try_push:
        with SyncLock("landing", None) as lock:
            try_push = current_landing.latest_try_push
            logger.info("Found try push %s" % try_push.treeherder_url)
            if try_push.taskgroup_id is None:
                update.update_taskgroup_ids(git_gecko, git_wpt, try_push)
                assert try_push.taskgroup_id is not None
            with try_push.as_mut(lock), current_landing.as_mut(lock):
                if kwargs["retry"]:
                    update_landing()
                elif try_push.status == "open":
                    tasks = try_push.tasks()
                    try_result = current_landing.try_result(tasks=tasks)
                    if try_result == landing.TryPushResult.pending:
                        logger.info(
                            "Landing in bug %s is waiting for try results" %
                            landing.bug)
                    else:
                        try:
                            landing.try_push_complete(
                                git_gecko,
                                git_wpt,
                                try_push,
                                current_landing,
                                allow_push=kwargs["push"],
                                accept_failures=accept_failures,
                                tasks=tasks)
                        except errors.AbortError:
                            # Don't need to raise an error here because
                            # the logging is the important part
                            return
                else:
                    update_landing()
    else:
        update_landing()
Esempio n. 5
0
def do_try_push_add(git_gecko,
                    git_wpt,
                    sync_type=None,
                    sync_id=None,
                    **kwargs):
    import downstream
    import landing
    import trypush

    sync = None
    if sync_type is None:
        sync = sync_from_path(git_gecko, git_wpt)
    elif sync_type == "downstream":
        sync = downstream.DownstreamSync.for_pr(git_gecko, git_wpt, sync_id)
    elif sync_type == "landing":
        syncs = landing.LandingSync.for_bug(git_gecko,
                                            git_wpt,
                                            sync_id,
                                            flat=True)
        if syncs:
            sync = syncs[0]
    else:
        raise ValueError

    if not sync:
        raise ValueError

    class FakeTry(object):
        def __init__(self, *_args, **_kwargs):
            pass

        def __enter__(self):
            return self

        def __exit__(self, *args):
            pass

        def push(self):
            return kwargs["try_rev"]

    with SyncLock.for_process(sync.process_name) as lock:
        with sync.as_mut(lock):
            trypush = trypush.TryPush.create(
                lock,
                sync,
                None,
                stability=kwargs["stability"],
                try_cls=FakeTry,
                rebuild_count=kwargs["rebuild_count"],
                check_open=False)

    print "Now run an update for the sync"
Esempio n. 6
0
def delete_worktree(process_name, worktree):
    assert worktree.path.startswith(
        os.path.join(env.config["root"], env.config["paths"]["worktrees"]))
    with SyncLock.for_process(process_name):
        try:
            logger.info("Deleting path %s" % worktree.path)
            shutil.rmtree(worktree.path)
        except Exception:
            logger.warning("Failed to remove worktree %s:%s" %
                           (worktree.path, traceback.format_exc()))
        else:
            logger.info("Removed worktree %s" % (worktree.path, ))
        worktree.prune(True)
Esempio n. 7
0
    def __call__(self, git_gecko, git_wpt, body):
        newrelic.agent.set_transaction_name("TaskGroupHandler")
        taskgroup_id = tc.normalize_task_id(body["taskGroupId"])

        newrelic.agent.add_custom_parameter("tc_task", taskgroup_id)

        try_push = trypush.TryPush.for_taskgroup(git_gecko, taskgroup_id)
        if not try_push:
            logger.debug("No try push for taskgroup %s" % taskgroup_id)
            # this is not one of our try_pushes
            return
        logger.info("Found try push for taskgroup %s" % taskgroup_id)
        sync = try_push.sync(git_gecko, git_wpt)

        with SyncLock.for_process(sync.process_name) as lock:
            with sync.as_mut(lock), try_push.as_mut(lock):
                # We sometimes see the taskgroup ID being None. If it isn't set but found via its
                # taskgroup ID, it is safe to set it here.
                if try_push.taskgroup_id is None:
                    logger.info(
                        "Try push for taskgroup %s does not have its ID set, setting now"
                        % taskgroup_id)
                    try_push.taskgroup_id = taskgroup_id
                    newrelic.agent.record_custom_event("taskgroup_id_missing",
                                                       params={
                                                           "taskgroup-id":
                                                           taskgroup_id,
                                                           "try_push":
                                                           try_push,
                                                           "sync": sync,
                                                       })
                elif try_push.taskgroup_id != taskgroup_id:
                    msg = (
                        "TryPush %s, expected taskgroup ID %s, found %s instead"
                        % (try_push, taskgroup_id, try_push.taskgroup_id))
                    logger.error(msg)
                    exc = ValueError(msg)
                    newrelic.agent.record_exception(exc=exc)
                    raise exc

                if sync:
                    logger.info("Updating try push for sync %r" % sync)
                if isinstance(sync, downstream.DownstreamSync):
                    downstream.try_push_complete(git_gecko, git_wpt, try_push,
                                                 sync)
                elif isinstance(sync, landing.LandingSync):
                    landing.try_push_complete(git_gecko, git_wpt, try_push,
                                              sync)
Esempio n. 8
0
def update_bug(git_gecko, git_wpt, bug):
    syncs = get_bug_sync(git_gecko, git_wpt, bug)
    if not syncs:
        raise ValueError("No sync for bug %s" % bug)

    for status in upstream.UpstreamSync.statuses:
        syncs_for_status = syncs.get(status)
        if not syncs_for_status:
            continue
        with SyncLock("upstream", None) as lock:
            for sync in syncs_for_status:
                if isinstance(sync, upstream.UpstreamSync):
                    with sync.as_mut(lock):
                        upstream.update_sync(git_gecko, git_wpt, sync)
                else:
                    logger.warning("Can't update sync %s" % sync)
Esempio n. 9
0
def do_notify(git_gecko, git_wpt, pr_ids, *args, **kwargs):
    import downstream
    if not pr_ids:
        syncs = [sync_from_path(git_gecko, git_wpt)]
    else:
        syncs = [
            downstream.DownstreamSync.for_pr(git_gecko, git_wpt, pr_id)
            for pr_id in pr_ids
        ]
    for sync in syncs:
        if sync is None:
            logger.error("No active sync for PR %s" % pr_id)
        else:
            with SyncLock.for_process(sync.process_name) as lock:
                with sync.as_mut(lock):
                    sync.try_notify(force=kwargs["force"])
Esempio n. 10
0
def gecko_push(git_gecko,
               git_wpt,
               repository_name,
               hg_rev,
               raise_on_error=False,
               base_rev=None):
    rev = git_gecko.cinnabar.hg2git(hg_rev)
    last_sync_point, prev_commit = UpstreamSync.prev_gecko_commit(
        git_gecko, repository_name)

    if base_rev is None and git_gecko.is_ancestor(rev,
                                                  last_sync_point.commit.sha1):
        logger.info("Last sync point moved past commit")
        return

    with SyncLock("upstream", None) as lock:
        updated = updated_syncs_for_push(
            git_gecko, git_wpt, prev_commit,
            sync_commit.GeckoCommit(git_gecko, rev))

        if updated is None:
            return set(), set(), set()

        create_endpoints, update_syncs = updated

        pushed_syncs, failed_syncs = update_sync_prs(
            lock,
            git_gecko,
            git_wpt,
            create_endpoints,
            update_syncs,
            raise_on_error=raise_on_error)

        landable_syncs = {
            item
            for item in UpstreamSync.load_by_status(git_gecko, git_wpt, "open")
            if item.error is None
        }
        landed_syncs = try_land_syncs(lock, landable_syncs)

        # TODO
        if not git_gecko.is_ancestor(rev, last_sync_point.commit.sha1):
            with last_sync_point.as_mut(lock):
                last_sync_point.commit = rev

    return pushed_syncs, landed_syncs, failed_syncs
Esempio n. 11
0
    def try_notify(self, force=False):
        if self.results_notified and not force:
            return

        if not self.bug:
            logger.error("Sync for PR %s has no associated bug" % self.pr)
            return

        if not self.affected_tests():
            logger.debug("PR %s doesn't have affected tests so skipping results notification" %
                         self.pr)
            return

        logger.info("Trying to generate results notification for PR %s" % self.pr)

        results = notify.results.for_sync(self)

        if not results:
            # TODO handle errors here better, perhaps
            logger.error("Failed to get results notification for PR %s" % self.pr)
            return

        message, truncated = notify.msg.for_results(results)

        with env.bz.bug_ctx(self.bug) as bug:
            if truncated:
                bug.add_attachment(data=message.encode("utf8"),
                                   file_name="wpt-results.md",
                                   summary="Notable wpt changes",
                                   is_markdown=True,
                                   comment=truncated)
            else:
                env.bz.comment(self.bug, message, is_markdown=True)

        bugs = notify.bugs.for_sync(self, results)
        notify.bugs.update_metadata(self, bugs)

        self.results_notified = True

        with SyncLock.for_process(self.process_name) as lock:
            for try_push in self.try_pushes():
                with try_push.as_mut(lock):
                    try_push.cleanup_logs()
Esempio n. 12
0
def do_delete(git_gecko, git_wpt, sync_type, obj_ids, *args, **kwargs):
    import trypush
    for obj_id in obj_ids:
        logger.info("%s %s" % (sync_type, obj_id))
        if kwargs["try"]:
            objs = trypush.TryPush.load_by_obj(git_gecko,
                                               sync_type,
                                               obj_id,
                                               seq_id=kwargs["seq_id"])
        else:
            objs = get_syncs(git_gecko,
                             git_wpt,
                             sync_type,
                             obj_id,
                             seq_id=kwargs["seq_id"])
        if not kwargs["all"] and objs:
            objs = sorted(objs, key=lambda x: -int(x.process_name.seq_id))[:1]
        for obj in objs:
            with SyncLock.for_process(obj.process_name) as lock:
                with obj.as_mut(lock):
                    obj.delete()
Esempio n. 13
0
def do_retrigger(git_gecko, git_wpt, **kwargs):
    import errors
    import update
    import upstream
    from landing import current, load_sync_point, unlanded_with_type

    update_repositories(git_gecko, git_wpt)

    if kwargs["upstream"]:
        print("Retriggering upstream syncs with errors")
        for sync in upstream.UpstreamSync.load_by_status(
                git_gecko, git_wpt, "open"):
            if sync.error:
                with SyncLock.for_process(sync.process_name) as lock:
                    with sync.as_mut(lock):
                        try:
                            upstream.update_sync(git_gecko,
                                                 git_wpt,
                                                 sync,
                                                 repo_update=False)
                        except errors.AbortError as e:
                            print("Update failed:\n%s" % e)
                            pass

    if kwargs["downstream"]:
        print("Retriggering downstream syncs on master")
        current_landing = current(git_gecko, git_wpt)
        if current_landing is None:
            sync_point = load_sync_point(git_gecko, git_wpt)
            prev_wpt_head = sync_point["upstream"]
        else:
            prev_wpt_head = current_landing.wpt_commits.head.sha1
        unlandable = unlanded_with_type(git_gecko, git_wpt, None,
                                        prev_wpt_head)

        errors = update.retrigger(git_gecko, git_wpt, unlandable)
        if errors:
            print("The following PRs have errors:\n%s" % "\n".join(errors))
Esempio n. 14
0
def handle_pr(git_gecko, git_wpt, event):
    newrelic.agent.set_transaction_name("handle_pr")
    pr_id = event["number"]
    newrelic.agent.add_custom_parameter("pr", pr_id)
    newrelic.agent.add_custom_parameter("action", event["action"])
    env.gh_wpt.load_pull(event["pull_request"])

    sync = get_pr_sync(git_gecko, git_wpt, pr_id)
    repo_update = event.get("_wptsync", {}).get("repo_update", True)

    if not sync:
        # If we don't know about this sync then it's a new thing that we should
        # set up state for
        # TODO: maybe want to create a new sync here irrespective of the event
        # type because we missed some events.
        if event["action"] == "opened":
            downstream.new_wpt_pr(git_gecko,
                                  git_wpt,
                                  event["pull_request"],
                                  repo_update=repo_update)
    else:
        if isinstance(sync, downstream.DownstreamSync):
            update_func = downstream.update_pr
        elif isinstance(sync, upstream.UpstreamSync):
            update_func = upstream.update_pr
        else:
            return

        merge_sha = (event["pull_request"]["merge_commit_sha"]
                     if event["pull_request"]["merged"] else None)
        merged_by = (event["pull_request"]["merged_by"]["login"]
                     if merge_sha else None)
        with SyncLock.for_process(sync.process_name) as lock:
            with sync.as_mut(lock):
                update_func(git_gecko, git_wpt, sync, event["action"],
                            merge_sha, event["pull_request"]["base"]["sha"],
                            merged_by)
Esempio n. 15
0
    def reverts_syncs(self):
        """Return a set containing the previous syncs reverted by this one, if any"""
        revert_re = re.compile("This reverts commit ([0-9A-Fa-f]+)")
        unreverted_commits = defaultdict(set)
        for commit in self.wpt_commits:
            if not commit.msg.startswith("Revert "):
                # If not everything is a revert then return
                return set()
            revert_shas = revert_re.findall(commit.msg)
            if len(revert_shas) == 0:
                return set()
            # Just use the first match for now
            sha = revert_shas[0]
            try:
                self.git_wpt.rev_parse(sha)
            except (ValueError, git.BadName):
                # Commit isn't in this repo (could be upstream)
                return set()
            pr = env.gh_wpt.pr_for_commit(sha)
            if pr is None:
                return set()
            sync = DownstreamSync.for_pr(self.git_gecko, self.git_wpt, pr)
            if sync is None:
                return set()
            if sync not in unreverted_commits:
                # Ensure we have the latest commits for the reverted sync
                with SyncLock.for_process(sync.process_name) as revert_lock:
                    with sync.as_mut(revert_lock):
                        sync.update_wpt_commits()
                unreverted_commits[sync] = {item.sha1 for item in sync.wpt_commits}
            if sha in unreverted_commits[sync]:
                unreverted_commits[sync].remove(sha)

        rv = {sync for sync, unreverted in unreverted_commits.iteritems()
              if not unreverted}
        return rv
Esempio n. 16
0
def update_pr(git_gecko, git_wpt, pr, force_rebase=False, repo_update=True):
    sync = get_pr_sync(git_gecko, git_wpt, pr.number)

    if sync and sync.status == "complete":
        logger.info("Sync already landed")
        return
    if sync:
        logger.info("sync status %s" % sync.landable_status)
    sync_point = landing.load_sync_point(git_gecko, git_wpt)

    if not sync:
        # If this looks like something that came from gecko, create
        # a corresponding sync
        with SyncLock("upstream", None) as lock:
            upstream_sync = upstream.UpstreamSync.from_pr(lock,
                                                          git_gecko,
                                                          git_wpt,
                                                          pr.number,
                                                          pr.body)
            if upstream_sync is not None:
                with upstream_sync.as_mut(lock):
                    upstream.update_pr(git_gecko,
                                       git_wpt,
                                       upstream_sync,
                                       pr.state,
                                       pr.merged)
            else:
                if pr.state != "open" and not pr.merged:
                    return
            schedule_pr_task("opened", pr, repo_update=repo_update)
            update_for_status(pr, repo_update=repo_update)
    elif isinstance(sync, downstream.DownstreamSync):
        with SyncLock.for_process(sync.process_name) as lock:
            with sync.as_mut(lock):
                if force_rebase:
                    sync.gecko_rebase(sync.gecko_integration_branch())

                if len(sync.wpt_commits) == 0:
                    sync.update_wpt_commits()

                if not sync.bug and not (pr.state == "closed" and not pr.merged):
                    sync.create_bug(git_wpt, pr.number, pr.title, pr.body)

                if pr.state == "open" or pr.merged:
                    if pr.head.sha != sync.wpt_commits.head:
                        # Upstream has different commits, so run a push handler
                        schedule_pr_task("push", pr, repo_update=repo_update)

                    elif sync.latest_valid_try_push:
                        logger.info("Treeherder url %s" % sync.latest_valid_try_push.treeherder_url)
                        if not sync.latest_valid_try_push.taskgroup_id:
                            update_taskgroup_ids(git_gecko, git_wpt,
                                                 sync.latest_valid_try_push)

                        if (sync.latest_valid_try_push.taskgroup_id and
                            not sync.latest_valid_try_push.status == "complete"):
                            update_tasks(git_gecko, git_wpt, sync=sync)

                        if not sync.latest_valid_try_push.taskgroup_id:
                            logger.info("Try push doesn't have a complete decision task")
                            return
                if not pr.merged:
                    update_for_status(pr, repo_update=repo_update)
                else:
                    update_for_action(pr, "closed", repo_update=repo_update)

    elif isinstance(sync, upstream.UpstreamSync):
        with SyncLock.for_process(sync.process_name) as lock:
            with sync.as_mut(lock):
                merge_sha = pr.merge_commit_sha if pr.merged else None
                upstream.update_pr(git_gecko, git_wpt, sync, pr.state, merge_sha)
                sync.try_land_pr()
                if merge_sha:
                    if git_wpt.is_ancestor(merge_sha, sync_point["upstream"]):
                        # This sync already landed, so it should be finished
                        sync.finish()
                    else:
                        if sync.status == "complete":
                            # We bypass the setter here because we have some cases where the
                            # status must go from complete to wpt-merged which is otherwise
                            # forbidden
                            sync.process_name.status = "wpt-merged"
                        else:
                            sync.status = "wpt-merged"
Esempio n. 17
0
    def update_commits(self):
        exception = None
        try:
            self.update_wpt_commits()

            # Check if this sync reverts some unlanded earlier PR and if so mark both
            # as skip and don't try to apply the commits here
            reverts = self.reverts_syncs()
            if reverts:
                all_open = all(item.status == "open" for item in reverts)
                for revert_sync in reverts:
                    if revert_sync.status == "open":
                        logger.info("Skipping sync for PR %s because it is later reverted" %
                                    revert_sync.pr)
                        with SyncLock.for_process(revert_sync.process_name) as revert_lock:
                            with revert_sync.as_mut(revert_lock):
                                revert_sync.skip = True
                # TODO: If this commit reverts some closed syncs, then set the metadata
                # commit of this commit to the revert of the metadata commit from that
                # sync
                if all_open:
                    logger.info("Sync was a revert of other open syncs, skipping")
                    self.skip = True
                    return False

            old_gecko_head = self.gecko_commits.head.sha1
            logger.debug("PR %s gecko HEAD was %s" % (self.pr, old_gecko_head))

            def plain_apply():
                logger.info("Applying on top of the current commits")
                self.wpt_to_gecko_commits()
                return True

            def rebase_apply():
                logger.info("Applying with a rebase onto latest inbound")
                new_base = self.gecko_integration_branch()
                gecko_work = self.gecko_worktree.get()
                reset_head = "HEAD"
                if (len(self.gecko_commits) > 0 and
                    self.gecko_commits[0].metadata.get("wpt-type") == "dependent"):
                    # If we have any dependent commits first reset to the new
                    # head. This prevents conflicts if the dependents already
                    # landed
                    # TODO: Actually check if they landed?
                    reset_head = new_base
                gecko_work.git.reset(reset_head, hard=True)
                try:
                    self.gecko_rebase(new_base)
                except git.GitCommandError:
                    try:
                        gecko_work.git.rebase(abort=True)
                    except git.GitCommandError:
                        pass
                    raise AbortError("Rebasing onto latest gecko failed")
                self.wpt_to_gecko_commits(base=new_base)
                return True

            def dependents_apply():
                logger.info("Applying with upstream dependents")
                dependencies = self.unlanded_commits_same_files()
                if dependencies:
                    logger.info("Found dependencies:\n%s" %
                                "\n".join(item.msg.splitlines()[0] for item in dependencies))
                    self.wpt_to_gecko_commits(dependencies)
                    env.bz.comment(self.bug,
                                   "PR %s applied with additional changes from upstream: %s"
                                   % (self.pr, ", ".join(item.sha1 for item in dependencies)))
                    return True

            error = None
            for fn in [plain_apply, rebase_apply, dependents_apply]:
                try:
                    if fn():
                        error = None
                        break
                    else:
                        logger.error("Applying with %s was a no-op" % fn.__name__)
                except Exception as e:
                    import traceback
                    error = e
                    logger.error("Applying with %s errored" % fn.__name__)
                    logger.error(traceback.format_exc(e))

            if error is not None:
                raise error

            logger.debug("PR %s gecko HEAD now %s" % (self.pr, self.gecko_commits.head.sha1))
            if old_gecko_head == self.gecko_commits.head.sha1:
                logger.info("Gecko commits did not change for PR %s" % self.pr)
                return False

            # If we have a metadata commit already, ensure it's applied now
            if "metadata-commit" in self.data:
                self.ensure_metadata_commit()

            renames = self.wpt_renames()
            self.move_metadata(renames)
            self.update_bug_components(renames)

            files_changed = self.files_changed()
            self.set_bug_component(files_changed)
        except Exception as e:
            exception = e
            raise
        finally:
            # If we managed to apply all the commits without error, reset the error flag
            # otherwise update it with the current exception
            self.error = exception
        return True
Esempio n. 18
0
    def __call__(self, git_gecko, git_wpt, body):
        newrelic.agent.set_transaction_name("TaskHandler")
        task_id = body["status"]["taskId"]
        state = body["status"]["state"]

        newrelic.agent.add_custom_parameter("tc_task", task_id)
        newrelic.agent.add_custom_parameter("state", state)

        # Enforce the invariant that the taskgroup id is not set until
        # the decision task is complete. This allows us to determine if a
        # try push should have the expected wpt tasks just by checking if
        # this is set
        if state not in ("completed", "failed", "exception"):
            logger.info("Decision task is not yet complete, status %s" % state)
            return

        task = tc.get_task(task_id)
        sha1 = task.get("payload", {}).get("env", {}).get("GECKO_HEAD_REV")

        if sha1 is None:
            raise ValueError("Failed to get commit sha1 from task message")

        if state == "exception":
            run_id = body.get("runId")
            runs = body.get("status", {}).get("runs", [])
            if 0 <= run_id < len(runs):
                reason = runs[run_id].get("reasonResolved")
                if reason in [
                        "superseded", "claim-expired", "worker-shutdown",
                        "intermittent-task"
                ]:
                    logger.info("Task %s had an exception for reason %s, "
                                "assuming taskcluster will retry" %
                                (task_id, reason))
                    return

        try_push = trypush.TryPush.for_commit(git_gecko, sha1)
        if not try_push:
            logger.debug("No try push for SHA1 %s taskId %s" % (sha1, task_id))
            owner = task.get("metadata", {}).get("owner")
            if owner == "*****@*****.**":
                # This could be a race condition if the decision task completes before this
                # task is in the index
                raise RetryableError(
                    "Got a wptsync task with no corresponding try push")
            return

        with SyncLock.for_process(try_push.process_name) as lock:
            with try_push.as_mut(lock):
                # If we retrigger, we create a new taskgroup, with id equal to the new task_id.
                # But the retriggered decision task itself is still in the original taskgroup
                if state == "completed":
                    logger.info("Setting taskgroup id for try push %r to %s" %
                                (try_push, task_id))
                    try_push.taskgroup_id = task_id
                elif state in ("failed", "exception"):
                    sync = try_push.sync(git_gecko, git_wpt)
                    message = ("Decision task got status %s for task %s%s" %
                               (state, sha1, " PR %s" %
                                sync.pr if sync and sync.pr else ""))
                    logger.error(message)
                    task = tc.get_task(task_id)
                    taskgroup = tc.TaskGroup(task["taskGroupId"])
                    if len(
                            taskgroup.view(lambda x: x["task"]["metadata"][
                                "name"] == "Gecko Decision Task")) > 5:
                        try_push.status = "complete"
                        try_push.infra_fail = True
                        if sync and sync.bug:
                            # TODO this is commenting too frequently on bugs
                            env.bz.comment(
                                sync.bug,
                                "Try push failed: decision task %i returned error"
                                % task_id)
                    else:
                        client = tc.TaskclusterClient()
                        client.retrigger(task_id)