Ejemplo n.º 1
0
 def deserialize(cls, train: "Train",
                 data: "TrainCar.Serialized") -> "TrainCar":
     # NOTE(sileht): Backward compat, can be removed soon
     if "state" not in data:
         data["state"] = "created"
     if "queued_at" not in data:
         data["queued_at"] = utils.utcnow()
     return cls(train, **data)
Ejemplo n.º 2
0
    async def _translate_exception_to_retries(
        self,
        e,
        installation_id,
        attempts_key=None,
    ):
        stream_name = f"stream~{installation_id}"

        if isinstance(e, github.TooManyPages):
            # TODO(sileht): Ideally this should be catcher earlier to post an
            # appropriate check-runs to inform user the PR is too big to be handled
            # by Mergify, but this need a bit of refactory to do it, so in the
            # meantimes...
            if attempts_key:
                await self.redis.hdel("attempts", attempts_key)
            await self.redis.hdel("attempts", stream_name)
            raise IgnoredException()

        if exceptions.should_be_ignored(e):
            if attempts_key:
                await self.redis.hdel("attempts", attempts_key)
            await self.redis.hdel("attempts", stream_name)
            raise IgnoredException()

        if isinstance(e, exceptions.RateLimited):
            retry_at = utils.utcnow() + datetime.timedelta(seconds=e.countdown)
            score = retry_at.timestamp()
            if attempts_key:
                await self.redis.hdel("attempts", attempts_key)
            await self.redis.hdel("attempts", stream_name)
            await self.redis.zaddoption("streams", "XX",
                                        **{stream_name: score})
            raise StreamRetry(0, retry_at) from e

        backoff = exceptions.need_retry(e)
        if backoff is None:
            # NOTE(sileht): This is our fault, so retry until we fix the bug but
            # without increasing the attempts
            raise

        attempts = await self.redis.hincrby("attempts", stream_name)
        retry_in = 3**min(attempts, 3) * backoff
        retry_at = utils.utcnow() + datetime.timedelta(seconds=retry_in)
        score = retry_at.timestamp()
        await self.redis.zaddoption("streams", "XX", **{stream_name: score})
        raise StreamRetry(attempts, retry_at) from e
Ejemplo n.º 3
0
def _move_pull_at_end(pull):  # pragma: no cover
    redis = utils.get_redis_for_cache()
    queue = _get_queue_cache_key(pull)
    score = utils.utcnow().timestamp()
    redis.zadd(queue, {pull.g_pull.number: score}, xx=True)
    pull.log.debug(
        "pull request moved at the end of the merge queue", queue=queue,
    )
Ejemplo n.º 4
0
    def _add_pull(self, pull_number, priority, update=False):
        """Add a pull without setting its method.

        :param update: If update is True, don't create PR if it's not there.
        """
        score = utils.utcnow().timestamp() / priority
        if update:
            flags = dict(xx=True)
        else:
            flags = dict(nx=True)
        self.redis.zadd(self._cache_key, {pull_number: score}, **flags)
Ejemplo n.º 5
0
async def create_initial_summary(owner: str, event_type: str, data: dict) -> None:
    if event_type != "pull_request":
        return

    if data["action"] != "opened":
        return

    redis = await utils.get_aredis_for_cache()

    if not await redis.exists(
        rules.get_config_location_cache_key(
            owner, data["pull_request"]["base"]["repo"]["name"]
        )
    ):
        # Mergify is probably not activated on this repo
        return

    # NOTE(sileht): It's possible that a "push" event creates a summary before we
    # received the pull_request/opened event.
    # So we check first if a summary does not already exists, to not post
    # the summary twice. Since this method can ran in parallel of the worker
    # this is not a 100% reliable solution, but if we post a duplicate summary
    # check_api.set_check_run() handle this case and update both to not confuse users.
    sha = context.Context.get_cached_last_summary_head_sha_from_pull(
        data["pull_request"]
    )
    if sha:
        return

    async with await github.aget_client(owner) as client:
        post_parameters = {
            "name": context.Context.SUMMARY_NAME,
            "head_sha": data["pull_request"]["head"]["sha"],
            "status": check_api.Status.IN_PROGRESS.value,
            "started_at": utils.utcnow().isoformat(),
            "details_url": f"{data['pull_request']['html_url']}/checks",
            "output": {
                "title": "Your rules are under evaluation",
                "summary": "Be patient, the page will be updated soon.",
            },
        }
        await client.post(
            f"/repos/{data['pull_request']['base']['user']['login']}/{data['pull_request']['base']['repo']['name']}/check-runs",
            api_version="antiope",  # type: ignore[call-arg]
            json=post_parameters,
        )

    await redis.set(
        context.Context.redis_last_summary_head_sha_key(data["pull_request"]),
        data["pull_request"]["head"]["sha"],
        ex=context.SUMMARY_SHA_EXPIRATION,
    )
Ejemplo n.º 6
0
async def push(
    redis: utils.RedisStream,
    owner_id: github_types.GitHubAccountIdType,
    owner: github_types.GitHubLogin,
    repo_id: typing.Optional[github_types.GitHubRepositoryIdType],
    repo: github_types.GitHubRepositoryName,
    pull_number: typing.Optional[github_types.GitHubPullRequestNumber],
    event_type: github_types.GitHubEventType,
    data: github_types.GitHubEvent,
) -> typing.Tuple[T_MessageID, T_MessagePayload]:
    stream_name = f"stream~{owner}~{owner_id}"
    scheduled_at = utils.utcnow() + datetime.timedelta(
        seconds=WORKER_PROCESSING_DELAY)
    score = scheduled_at.timestamp()
    transaction = await redis.pipeline()
    # NOTE(sileht): Add this event to the pull request stream
    payload = T_MessagePayload({
        b"event":
        msgpack.packb(
            {
                "owner_id": owner_id,
                "repo_id": repo_id,
                "owner": owner,
                "repo": repo,
                "pull_number": pull_number,
                "source": {
                    "event_type": event_type,
                    "data": data,
                    "timestamp": datetime.datetime.utcnow().isoformat(),
                },
            },
            use_bin_type=True,
        ),
    })

    await transaction.xadd(stream_name, payload)
    # NOTE(sileht): Add pull request stream to process to the list, only if it
    # does not exists, to not update the score(date)
    await transaction.zaddoption("streams", "NX", **{stream_name: score})
    message_id: T_MessageID = (await transaction.execute())[0]
    LOG.debug(
        "pushed to worker",
        gh_owner=owner,
        gh_repo=repo,
        gh_pull=pull_number,
        event_type=event_type,
    )
    return (message_id, payload)
Ejemplo n.º 7
0
async def create_initial_summary(
        redis: utils.RedisCache,
        event: github_types.GitHubEventPullRequest) -> None:
    owner = event["repository"]["owner"]["login"]

    if not await redis.exists(
            context.Repository.get_config_location_cache_key(
                event["pull_request"]["base"]["repo"]["owner"]["login"],
                event["pull_request"]["base"]["repo"]["name"],
            )):
        # Mergify is probably not activated on this repo
        return

    # NOTE(sileht): It's possible that a "push" event creates a summary before we
    # received the pull_request/opened event.
    # So we check first if a summary does not already exists, to not post
    # the summary twice. Since this method can ran in parallel of the worker
    # this is not a 100% reliable solution, but if we post a duplicate summary
    # check_api.set_check_run() handle this case and update both to not confuse users.
    sha = await context.Context.get_cached_last_summary_head_sha_from_pull(
        redis, event["pull_request"])

    if sha is not None or sha == event["pull_request"]["head"]["sha"]:
        return

    async with github.aget_client(owner) as client:
        post_parameters = {
            "name": context.Context.SUMMARY_NAME,
            "head_sha": event["pull_request"]["head"]["sha"],
            "status": check_api.Status.IN_PROGRESS.value,
            "started_at": utils.utcnow().isoformat(),
            "details_url": f"{event['pull_request']['html_url']}/checks",
            "output": {
                "title": "Your rules are under evaluation",
                "summary": "Be patient, the page will be updated soon.",
            },
        }
        try:
            await client.post(
                f"/repos/{event['pull_request']['base']['user']['login']}/{event['pull_request']['base']['repo']['name']}/check-runs",
                json=post_parameters,
            )
        except http.HTTPClientSideError as e:
            if e.status_code == 422 and "No commit found for SHA" in e.message:
                return
            raise
Ejemplo n.º 8
0
async def push(
    redis: aredis.StrictRedis,
    owner: str,
    repo: str,
    pull_number: typing.Optional[int],
    event_type: github_types.GitHubEventType,
    data: github_types.GitHubEvent,
) -> typing.Tuple[bool, T_Payload]:
    stream_name = f"stream~{owner}"
    scheduled_at = utils.utcnow() + datetime.timedelta(
        seconds=WORKER_PROCESSING_DELAY)
    score = scheduled_at.timestamp()
    transaction = await redis.pipeline()
    # NOTE(sileht): Add this event to the pull request stream
    payload = {
        b"event":
        msgpack.packb(
            {
                "owner": owner,
                "repo": repo,
                "pull_number": pull_number,
                "source": {
                    "event_type": event_type,
                    "data": data,
                    "timestamp": datetime.datetime.utcnow().isoformat(),
                },
            },
            use_bin_type=True,
        ),
    }

    await transaction.xadd(stream_name, payload)
    # NOTE(sileht): Add pull request stream to process to the list, only if it
    # does not exists, to not update the score(date)
    await transaction.zaddoption("streams", "NX", **{stream_name: score})
    ret = await transaction.execute()
    LOG.debug(
        "pushed to worker",
        gh_owner=owner,
        gh_repo=repo,
        gh_pull=pull_number,
        event_type=event_type,
    )
    return (ret[0], payload)
Ejemplo n.º 9
0
async def async_reschedule_now() -> int:
    parser = argparse.ArgumentParser(description="Rescheduler for Mergify")
    parser.add_argument("org", help="Organization")
    args = parser.parse_args()

    redis = await utils.create_aredis_for_stream()
    streams = await redis.zrangebyscore("streams", min=0, max="+inf")
    expected_stream = f"stream~{args.org.lower()}~"
    for stream in streams:
        if stream.decode().lower().startswith(expected_stream):
            scheduled_at = utils.utcnow()
            score = scheduled_at.timestamp()
            transaction = await redis.pipeline()
            await transaction.hdel("attempts", stream)
            await transaction.zadd("streams", **{stream.decode(): score})
            # NOTE(sileht): Do we need to cleanup the per PR attempt?
            # await transaction.hdel("attempts", attempts_key)
            await transaction.execute()
            return 0
    else:
        print(f"Stream for {args.org} not found")
        return 1
Ejemplo n.º 10
0
def set_check_run(pull, name, status, conclusion=None, output=None):
    post_parameters = {
        "name": name,
        "head_sha": pull.head.sha,
        "status": status,
    }
    if conclusion:
        post_parameters["conclusion"] = conclusion
    if output:
        post_parameters["output"] = output

    if status == "completed":
        post_parameters["completed_at"] = utils.utcnow().isoformat()

    checks = list(c for c in get_checks(pull, {"check_name": name})
                  if c._rawData['app']['id'] == config.INTEGRATION_ID)

    if not checks:
        headers, data = pull._requester.requestJsonAndCheck(
            "POST",
            "%s/check-runs" % (pull.base.repo.url),
            input=post_parameters,
            headers={'Accept':
                     'application/vnd.github.antiope-preview+json'}
        )
    elif len(checks) == 1:
        headers, data = pull._requester.requestJsonAndCheck(
            "PATCH",
            "%s/check-runs/%s" % (pull.base.repo.url, checks[0].id),
            input=post_parameters,
            headers={'Accept':
                     'application/vnd.github.antiope-preview+json'}
        )
    else:  # pragma no cover
        raise RuntimeError("Multiple mergify checks have been created, "
                           "we have a bug. %s" % pull.url)

    return Check(pull._requester, headers, data, completed=True)
Ejemplo n.º 11
0
    async def add_pull(self, ctxt: context.Context,
                       config: queue.PullQueueConfig) -> None:
        # TODO(sileht): handle base branch change

        best_position = -1
        for position, pseudo_car in enumerate(self._iter_pseudo_cars()):
            if pseudo_car.user_pull_request_number == ctxt.pull["number"]:
                # already in queue, we are good
                self.log.info(
                    "pull request already in train",
                    gh_pull=ctxt.pull["number"],
                    config=config,
                )
                return

            if (best_position == -1 and config["effective_priority"] >
                    pseudo_car.config["effective_priority"]):
                # We found a car with lower priority
                best_position = position

        if best_position == -1:
            best_position = len(self._cars) + len(self._waiting_pulls)

        await self._slice_cars_at(best_position)
        self._waiting_pulls.insert(
            best_position - len(self._cars),
            WaitingPull(ctxt.pull["number"], config, utils.utcnow()),
        )
        await self._save()
        ctxt.log.info(
            "pull request added to train",
            position=best_position,
            queue_name=config["name"],
        )

        # Refresh summary of others
        await self._refresh_pulls(ctxt.pull["base"]["repo"],
                                  except_pull_request=ctxt.pull["number"])
Ejemplo n.º 12
0
async def push(redis, installation_id, owner, repo, pull_number, event_type,
               data):
    stream_name = f"stream~{installation_id}"
    scheduled_at = utils.utcnow() + datetime.timedelta(
        seconds=WORKER_PROCESSING_DELAY)
    score = scheduled_at.timestamp()
    transaction = await redis.pipeline()
    # NOTE(sileht): Add this event to the pull request stream
    payload = {
        b"event":
        msgpack.packb(
            {
                "owner": owner,
                "repo": repo,
                "pull_number": pull_number,
                "source": {
                    "event_type": event_type,
                    "data": data
                },
            },
            use_bin_type=True,
        ),
    }

    ret = await redis.xadd(stream_name, payload)
    # NOTE(sileht): Add pull request stream to process to the list, only if it
    # does not exists, to not update the score(date)
    await transaction.zaddoption("streams", "NX", **{stream_name: score})
    await transaction.execute()
    LOG.debug(
        "pushed to worker",
        gh_owner=owner,
        gh_repo=repo,
        gh_pull=pull_number,
        event_type=event_type,
    )
    return (ret, payload)
Ejemplo n.º 13
0
def set_check_run(pull, name, status, conclusion=None, output=None):
    post_parameters = {
        "name": name,
        "head_sha": pull.head.sha,
        "status": status
    }
    if conclusion:
        post_parameters["conclusion"] = conclusion
    if output:
        # Maximum output/summary length for Check API is 65535
        summary = output.get("summary")
        if summary and len(summary) > 65535:
            output["summary"] = utils.unicode_truncate(summary, 65532)
            output["summary"] += "…"  # this is 3 bytes long
        post_parameters["output"] = output

    post_parameters["started_at"] = utils.utcnow().isoformat()
    post_parameters["details_url"] = "%s/checks" % pull.html_url

    if status == "completed":
        post_parameters["completed_at"] = utils.utcnow().isoformat()

    checks = list(c for c in get_checks(pull, {"check_name": name})
                  if c._rawData["app"]["id"] == config.INTEGRATION_ID)

    if not checks:
        headers, data = pull._requester.requestJsonAndCheck(
            "POST",
            "%s/check-runs" % (pull.base.repo.url),
            input=post_parameters,
            headers={"Accept": "application/vnd.github.antiope-preview+json"},
        )
        checks = [Check(pull._requester, headers, data, completed=True)]

    if len(checks) > 1:
        LOG.warning(
            "Multiple mergify checks have been created, "
            "we got the known race.",
            pull_request=pull,
        )

    post_parameters["details_url"] += "?check_run_id=%s" % checks[0].id

    # FIXME(sileht): We have no (simple) way to ensure we don't have multiple
    # worker doing POST at the same time. It's unlike to happen, but it has
    # happen once, so to ensure Mergify continue to work, we update all
    # checks. User will see the check twice for a while, but it's better than
    # having Mergify stuck
    for check in checks:
        # Don't do useless update
        if compare_dict(
                post_parameters,
                check.raw_data,
            ("name", "head_sha", "status", "conclusion", "details_url"),
        ):
            if check.output == output:
                continue
            elif (check.output is not None and output is not None
                  and compare_dict(output, check.output,
                                   ("title", "summary"))):
                continue

        headers, data = pull._requester.requestJsonAndCheck(
            "PATCH",
            "%s/check-runs/%s" % (pull.base.repo.url, check.id),
            input=post_parameters,
            headers={"Accept": "application/vnd.github.antiope-preview+json"},
        )
        check = Check(pull._requester, headers, data, completed=True)

    return check
Ejemplo n.º 14
0
def set_check_run(ctxt, name, status, conclusion=None, output=None):
    post_parameters = {
        "name": name,
        "head_sha": ctxt.pull["head"]["sha"],
        "status": status,
    }
    if conclusion:
        post_parameters["conclusion"] = conclusion
    if output:
        # Maximum output/summary length for Check API is 65535
        summary = output.get("summary")
        if summary and len(summary) > 65535:
            output["summary"] = utils.unicode_truncate(summary, 65532)
            output["summary"] += "…"  # this is 3 bytes long
        post_parameters["output"] = output

    post_parameters["started_at"] = utils.utcnow().isoformat()
    post_parameters["details_url"] = "%s/checks" % ctxt.pull["html_url"]

    if status == "completed":
        post_parameters["completed_at"] = utils.utcnow().isoformat()

    checks = get_checks(ctxt, check_name=name, mergify_only=True)

    if not checks:
        checks = [
            ctxt.client.post(
                "check-runs",
                api_version="antiope",
                json=post_parameters,
            ).json()
        ]

    if len(checks) > 1:
        ctxt.log.warning(
            "Multiple mergify checks have been created, we got the known race.",
        )

    post_parameters["details_url"] += "?check_run_id=%s" % checks[0]["id"]

    # FIXME(sileht): We have no (simple) way to ensure we don't have multiple
    # worker doing POST at the same time. It's unlike to happen, but it has
    # happen once, so to ensure Mergify continue to work, we update all
    # checks. User will see the check twice for a while, but it's better than
    # having Mergify stuck
    for check in checks:
        # Don't do useless update
        if compare_dict(
                post_parameters,
                check,
            ("name", "head_sha", "status", "conclusion", "details_url"),
        ):
            if check["output"] == output:
                continue
            elif (check["output"] is not None and output is not None
                  and compare_dict(output, check["output"],
                                   ("title", "summary"))):
                continue

        check = ctxt.client.patch(
            f"check-runs/{check['id']}",
            api_version="antiope",
            json=post_parameters,
        ).json()

    return check
Ejemplo n.º 15
0
    async def generate_merge_queue_summary(
        self,
        queue_rule: typing.Union[rules.EvaluatedQueueRule, rules.QueueRule],
        *,
        for_queue_pull_request: bool = False,
        show_queue: bool = True,
        headline: typing.Optional[str] = None,
    ) -> str:
        description = ""
        if headline:
            description += f"**{headline}**\n\n"

        description += (
            f"{self._get_embarked_refs(markdown=True)} are embarked together for merge."
        )

        if for_queue_pull_request:
            description += f"""

This pull request has been created by Mergify to speculatively check the mergeability of #{self.user_pull_request_number}.
You don't need to do anything. Mergify will close this pull request automatically when it is complete.
"""

        description += (
            f"\n\n**Required conditions of queue** `{queue_rule.name}` **for merge:**\n"
        )
        for cond in queue_rule.conditions:
            if isinstance(queue_rule, rules.EvaluatedQueueRule):
                checked = " " if cond in queue_rule.missing_conditions else "X"
            else:
                checked = " "
            description += f"\n- [{checked}] `{cond}`"
            if cond.description:
                description += f" [{cond.description}]"

        if show_queue:
            table = [
                "| | Pull request | Queue/Priority | Speculative checks | Queued",
                "| ---: | :--- | :--- | :--- | :--- |",
            ]
            for i, pseudo_car in enumerate(self.train._iter_pseudo_cars()):
                ctxt = await self.train.repository.get_pull_request_context(
                    pseudo_car.user_pull_request_number)
                pull_html_url = f"{ctxt.pull['base']['repo']['html_url']}/pull/{pseudo_car.user_pull_request_number}"
                try:
                    fancy_priority = merge_base.PriorityAliases(
                        pseudo_car.config["priority"]).name
                except ValueError:
                    fancy_priority = str(pseudo_car.config["priority"])

                speculative_checks = ""
                if isinstance(pseudo_car, TrainCar):
                    if pseudo_car.state == "updated":
                        speculative_checks = f"[in place]({pull_html_url})"
                    elif pseudo_car.state == "created":
                        speculative_checks = f"#{pseudo_car.queue_pull_request_number}"

                elapsed = utils.pretty_timedelta(utils.utcnow() -
                                                 pseudo_car.queued_at)
                table.append(
                    f"| {i + 1} "
                    f"| {ctxt.pull['title']} ([#{pseudo_car.user_pull_request_number}]({pull_html_url})) "
                    f"| {pseudo_car.config['name']}/{fancy_priority} "
                    f"| {speculative_checks} "
                    f"| {elapsed} ago "
                    "|")

            description += (
                "\n\n**The following pull requests are queued:**\n" +
                "\n".join(table))

        description += "\n\n---\n\n"
        description += constants.MERGIFY_MERGE_QUEUE_PULL_REQUEST_DOC
        return description.strip()
Ejemplo n.º 16
0
def set_check_run(
    ctxt: "context.Context",
    name: str,
    result: Result,
    external_id: typing.Optional[str] = None,
) -> github_types.GitHubCheckRun:
    if result.conclusion is Conclusion.PENDING:
        status = Status.IN_PROGRESS
    else:
        status = Status.COMPLETED

    post_parameters = {
        "name": name,
        "head_sha": ctxt.pull["head"]["sha"],
        "status": status.value,
        "started_at": utils.utcnow().isoformat(),
        "details_url": f"{ctxt.pull['html_url']}/checks",
        "output": {
            "title": result.title,
            "summary": result.summary,
        },
    }

    if result.annotations is not None:
        post_parameters["output"]["annotations"] = result.annotations

    # Maximum output/summary length for Check API is 65535
    summary = post_parameters["output"]["summary"]
    if summary and len(summary) > 65535:
        post_parameters["output"]["summary"] = utils.unicode_truncate(
            summary, 65532)
        post_parameters["output"]["summary"] += "…"  # this is 3 bytes long

    if external_id:
        post_parameters["external_id"] = external_id

    if status is Status.COMPLETED:
        post_parameters["conclusion"] = result.conclusion.value
        post_parameters["completed_at"] = utils.utcnow().isoformat()

    checks = [c for c in ctxt.pull_engine_check_runs if c["name"] == name]

    if not checks:
        check = typing.cast(
            github_types.GitHubCheckRun,
            ctxt.client.post(
                f"{ctxt.base_url}/check-runs",
                api_version="antiope",  # type: ignore[call-arg]
                json=post_parameters,
            ).json(),
        )
        ctxt.update_pull_check_runs(check)
        return check

    elif len(checks) > 1:
        ctxt.log.warning(
            "Multiple mergify checks have been created, we got the known race.",
        )

    post_parameters["details_url"] += "?check_run_id=%s" % checks[0]["id"]

    # FIXME(sileht): We have no (simple) way to ensure we don't have multiple
    # worker doing POST at the same time. It's unlike to happen, but it has
    # happen once, so to ensure Mergify continue to work, we update all
    # checks. User will see the check twice for a while, but it's better than
    # having Mergify stuck
    for check in checks:
        # Don't do useless update
        if compare_dict(
                post_parameters,
                check,
            ("name", "head_sha", "status", "conclusion", "details_url"),
        ):
            if check["output"] == post_parameters["output"]:
                continue
            elif check["output"] is not None and compare_dict(
                    post_parameters["output"], check["output"],
                ("title", "summary")):
                continue

        check = typing.cast(
            github_types.GitHubCheckRun,
            ctxt.client.patch(
                f"{ctxt.base_url}/check-runs/{check['id']}",
                api_version="antiope",  # type: ignore[call-arg]
                json=post_parameters,
            ).json(),
        )

    ctxt.update_pull_check_runs(check)
    return check
Ejemplo n.º 17
0
    async def _translate_exception_to_retries(
        self,
        stream_name: StreamNameType,
        attempts_key: typing.Optional[str] = None,
    ) -> typing.AsyncIterator[None]:
        try:
            yield
        except Exception as e:
            if isinstance(e, exceptions.MergeableStateUnknown) and attempts_key:
                attempts = await self.redis_stream.hincrby("attempts", attempts_key)
                if attempts < MAX_RETRIES:
                    raise PullRetry(attempts) from e
                else:
                    await self.redis_stream.hdel("attempts", attempts_key)
                    raise MaxPullRetry(attempts) from e

            if isinstance(e, exceptions.MergifyNotInstalled):
                if attempts_key:
                    await self.redis_stream.hdel("attempts", attempts_key)
                await self.redis_stream.hdel("attempts", stream_name)
                raise StreamUnused(stream_name)

            if isinstance(e, github.TooManyPages):
                # TODO(sileht): Ideally this should be catcher earlier to post an
                # appropriate check-runs to inform user the PR is too big to be handled
                # by Mergify, but this need a bit of refactory to do it, so in the
                # meantimes...
                if attempts_key:
                    await self.redis_stream.hdel("attempts", attempts_key)
                await self.redis_stream.hdel("attempts", stream_name)
                raise IgnoredException()

            if exceptions.should_be_ignored(e):
                if attempts_key:
                    await self.redis_stream.hdel("attempts", attempts_key)
                await self.redis_stream.hdel("attempts", stream_name)
                raise IgnoredException()

            if isinstance(e, exceptions.RateLimited):
                retry_at = utils.utcnow() + e.countdown
                score = retry_at.timestamp()
                if attempts_key:
                    await self.redis_stream.hdel("attempts", attempts_key)
                await self.redis_stream.hdel("attempts", stream_name)
                await self.redis_stream.zaddoption(
                    "streams", "XX", **{stream_name: score}
                )
                raise StreamRetry(stream_name, 0, retry_at)

            backoff = exceptions.need_retry(e)
            if backoff is None:
                # NOTE(sileht): This is our fault, so retry until we fix the bug but
                # without increasing the attempts
                raise

            attempts = await self.redis_stream.hincrby("attempts", stream_name)
            retry_in = 3 ** min(attempts, 3) * backoff
            retry_at = utils.utcnow() + retry_in
            score = retry_at.timestamp()
            await self.redis_stream.zaddoption("streams", "XX", **{stream_name: score})
            raise StreamRetry(stream_name, attempts, retry_at)
Ejemplo n.º 18
0
async def set_check_run(
    ctxt: "context.Context",
    name: str,
    result: Result,
    external_id: typing.Optional[str] = None,
) -> github_types.GitHubCheckRun:
    if result.conclusion is Conclusion.PENDING:
        status = Status.IN_PROGRESS
    else:
        status = Status.COMPLETED

    post_parameters = GitHubCheckRunParameters({
        "name":
        name,
        "head_sha":
        ctxt.pull["head"]["sha"],
        "status":
        typing.cast(github_types.GitHubCheckRunStatus, status.value),
        "started_at":
        utils.utcnow().isoformat(),
        "details_url":
        f"{ctxt.pull['html_url']}/checks",
        "output": {
            "title": result.title,
            "summary": result.summary,
        },
    })

    if result.annotations is not None:
        post_parameters["output"]["annotations"] = result.annotations

    # Maximum output/summary length for Check API is 65535
    summary = post_parameters["output"]["summary"]
    if summary and len(summary) > 65535:
        post_parameters["output"]["summary"] = utils.unicode_truncate(
            summary, 65532)
        post_parameters["output"]["summary"] += "…"  # this is 3 bytes long

    if external_id:
        post_parameters["external_id"] = external_id

    if status is Status.COMPLETED:
        post_parameters["conclusion"] = result.conclusion.value
        post_parameters["completed_at"] = utils.utcnow().isoformat()

    checks = sorted(
        (c for c in await ctxt.pull_engine_check_runs if c["name"] == name),
        key=lambda c: c["id"],
        reverse=True,
    )

    # Only keep the newer checks, cancelled others
    for check_to_cancelled in checks[1:]:
        if Status(check_to_cancelled["status"]) != Status.COMPLETED:
            await ctxt.client.patch(
                f"{ctxt.base_url}/check-runs/{check_to_cancelled['id']}",
                json={
                    "conclusion": Conclusion.CANCELLED.value,
                    "status": Status.COMPLETED.value,
                },
            )

    if not checks or (Status(checks[0]["status"]) == Status.COMPLETED
                      and status == Status.IN_PROGRESS):
        # NOTE(sileht): First time we see it, or the previous one have been completed and
        # now go back to in_progress. Since GitHub doesn't allow to change status of
        # completed check-runs, we have to create a new one.
        new_check = typing.cast(
            github_types.GitHubCheckRun,
            (await ctxt.client.post(
                f"{ctxt.base_url}/check-runs",
                json=post_parameters,
            )).json(),
        )
    else:
        post_parameters["details_url"] += f"?check_run_id={checks[0]['id']}"

        # Don't do useless update
        if check_need_update(checks[0], post_parameters):
            new_check = typing.cast(
                github_types.GitHubCheckRun,
                (await ctxt.client.patch(
                    f"{ctxt.base_url}/check-runs/{checks[0]['id']}",
                    json=post_parameters,
                )).json(),
            )
        else:
            new_check = checks[0]

    await ctxt.update_pull_check_runs(new_check)
    return new_check