Example #1
0
    def run(self):
        # Get wires from user
        wire_strings = self.get_list_or_quit(
            lambda x: x.lower() in Color.__members__.keys(), 
            range(3, 7), 
            'Input wire colors separated by a space.')

        # If our input test returned None, the user is quitting.  Else, continue
        if wire_strings == None:
            return

        self.wires = list(map(lambda x: Color[x.lower()], wire_strings))
        self.color_counts = Counter(self.wires)

        # Fire the correct method depending on number of wires
        wire_num = {
            3: self.three,
            4: self.four,
            5: self.five,
            6: self.six,
        }[len(self.wires)]()

        if wire_num is None:
            return

        self.output_and_wait(
            'Cut the {} wire', 
            [ordinalize(wire_num), 'last'][wire_num == len(self.wires)])
Example #2
0
 def run(self, edit):
     set_replacements(
         self.view, edit,
         map(
             lambda selection: Replacement(
                 selection,
                 inflection.ordinalize(self.view.substr(selection))),
             self.view.sel()))
Example #3
0
 def eliminate_ordinals_in_place(cls, keyword_processor):
     for i in range(1000):
         numstr = inflection.ordinalize(i)
         numset = copy(keyword_processor[numstr])
         if numset is not None:
             result_set = set([it for it in numset if it[0] != numstr])
             if len(result_set) == 0:
                 keyword_processor.remove_keyword(numstr)
             else:
                 keyword_processor[numstr] = result_set
Example #4
0
 def check(self, evaluated_expression, message=None):
     self.expression_counter += 1
     if not evaluated_expression:
         if message is None:
             message = "{nth} expression failed".format(
                 nth=inflection.ordinalize(self.expression_counter),
             )
         self.problems.append("{i}: {msg}".format(
             i=self.expression_counter,
             msg=message,
         ))
Example #5
0
def generate_tests(prob: problem.Problem,
                   executable_path: pathlib.Path,
                   prefix: str,
                   num_cases: int,
) -> bool:
    if not executable_path.is_file():
        with color_utils.ColorizeStderrError():
            print('ERROR:', executable_path, 'is not a file',
                  file=sys.stderr)
        return False

    if not os.access(executable_path, os.X_OK):
        with color_utils.ColorizeStderrError():
            print('ERROR:', executable_path, 'must be executable!',
                  file=sys.stderr)
        return False

    print('Generating', num_cases, 'test cases for problem', prob.name,
          'with executable', executable_path,
          file=sys.stderr)

    existing_test_ids = set(prob.get_test_ids())
    id_gen = (prefix + '{:06d}'.format(i) for i in itertools.count())
    new_test_ids = list(itertools.islice(
        (test_id for test_id in id_gen if test_id not in existing_test_ids),
        num_cases))

    rng = random.SystemRandom()
    for i, test_id in enumerate(new_test_ids):
        seed = rng.getrandbits(31)
        print('Generating', inflection.ordinalize(i), 'case',
              'with id', test_id, 'and seed', seed,
              file=sys.stderr)
        input_path = prob.get_test_input_path(test_id)
        answer_path = prob.get_test_answer_path(test_id)

        if not _generate(i, seed, executable_path, input_path, answer_path):
            with color_utils.ColorizeStderrError():
                print('Terminating early due to error.',
                      file=sys.stderr)
            return False

    with color_utils.ColorizeStderrGood():
        print('Successfully generated', num_cases, 'test cases for',
              'problem', prob.name,
              file=sys.stderr)
    return True
Example #6
0
 def visitor_cookie_handler(self, request):
     # Use the COOKIES.get() function to obtain the visits cookie.
     # Cast the value returned to an integer or set the value to
     # 1 if the cookie doesn't exist.
     visits = int(self.get_server_side_cookie(request, 'visits', '1'))
     last_visit_cookie = self.get_server_side_cookie(request, 'last_visit',
                                             str(datetime.now()))
     last_visit_time = datetime.strptime(last_visit_cookie[:-7],
                                         '%Y-%m-%d %H:%M:%S')
     # if it's been more than a day since the last visit..
     if (datetime.now() - last_visit_time).seconds >= 86400:
         # Update the cookie visits count & last_visit time
         visits = visits + 1
         request.session['last_visit'] = str(datetime.now())
         # Set the last visit cookie
         request.session['last_visit'] = last_visit_cookie
         # Update/set the visits cookie
         request.session['visits'] = visits
     visits = inflection.ordinalize(visits)
     return visits
Example #7
0
def test_ordinalize(number, ordinalized):
    assert ordinalized == inflection.ordinalize(number)
Example #8
0
async def mergeable(
    api: PRAPI,
    config: Union[config.V1, pydantic.ValidationError, toml.TomlDecodeError],
    config_str: str,
    config_path: str,
    pull_request: PullRequest,
    branch_protection: Optional[BranchProtectionRule],
    review_requests: List[PRReviewRequest],
    reviews: List[PRReview],
    contexts: List[StatusContext],
    check_runs: List[CheckRun],
    valid_signature: bool,
    valid_merge_methods: List[MergeMethod],
    merging: bool,
    is_active_merge: bool,
    skippable_check_timeout: int,
    api_call_retry_timeout: int,
    api_call_retry_method_name: Optional[str],
    app_id: Optional[str] = None,
) -> None:
    log = logger.bind(
        config=config,
        pull_request=pull_request,
        branch_protection=branch_protection,
        review_requests=review_requests,
        reviews=reviews,
        contexts=contexts,
        valid_signature=valid_signature,
        valid_merge_methods=valid_merge_methods,
    )

    # we set is_active_merge when the PR is being merged from the merge queue.
    # We don't want to clobber any statuses set by that system, so we take no
    # action. If the PR becomes ineligible for merging that logic will handle
    # it.
    async def set_status(msg: str,
                         markdown_content: Optional[str] = None) -> None:
        # don't clobber statuses set via merge loop.
        if is_active_merge:
            return
        await api.set_status(
            msg,
            latest_commit_sha=pull_request.latest_sha,
            markdown_content=markdown_content,
        )

    if not isinstance(config, V1):
        log.warning("problem fetching config")
        await set_status(
            '⚠️ Invalid configuration (Click "Details" for more info.)',
            markdown_content=get_markdown_for_config(config,
                                                     config_str=config_str,
                                                     git_path=config_path),
        )
        await api.dequeue()
        return

    if api_call_retry_timeout == 0:
        log.warning("timeout reached for api calls to GitHub")
        if api_call_retry_method_name is not None:
            await set_status(
                f"⚠️ problem contacting GitHub API with method {api_call_retry_method_name!r}"
            )
        else:
            await set_status("⚠️ problem contacting GitHub API")
        return

    # if we have an app_id in the config then we only want to work on this repo
    # if our app_id from the environment matches the configuration.
    if config.app_id is not None and config.app_id != app_id:
        log.info("missing required app_id")
        await api.dequeue()
        return

    if branch_protection is None:
        await cfg_err(
            api,
            pull_request,
            f"missing branch protection for baseRef: {pull_request.baseRefName!r}",
        )
        return

    if branch_protection.requiresCommitSignatures and config.merge.method in (
            MergeMethod.rebase,
            MergeMethod.squash,
    ):
        await cfg_err(
            api,
            pull_request,
            '"Require signed commits" branch protection is only supported with merge commits. Squash and rebase are not supported by GitHub.',
        )
        return

    if config.merge.method not in valid_merge_methods:
        valid_merge_methods_str = [
            method.value for method in valid_merge_methods
        ]
        await cfg_err(
            api,
            pull_request,
            f"configured merge.method {config.merge.method.value!r} is invalid. Valid methods for repo are {valid_merge_methods_str!r}",
        )
        return

    # we keep the configuration errors before the rest of the application logic
    # so configuration issues are surfaced as early as possible.

    if (pull_request.author.login in config.approve.auto_approve_usernames
            and pull_request.state == PullRequestState.OPEN
            and pull_request.mergeStateStatus != MergeStateStatus.DRAFT):
        # if the PR was created by an approve author and we have not previously
        # given an approval, approve the PR.
        sorted_reviews = sorted(reviews, key=lambda x: x.createdAt)
        kodiak_reviews = [
            review for review in sorted_reviews
            if review.author.login == KODIAK_LOGIN
        ]
        status = review_status(kodiak_reviews)
        if status != PRReviewState.APPROVED:
            await api.approve_pull_request()
        else:
            log.info("approval already exists, not adding another")

    need_branch_update = (branch_protection.requiresStrictStatusChecks
                          and pull_request.mergeStateStatus
                          == MergeStateStatus.BEHIND)
    meets_label_requirement = (config.merge.automerge_label
                               in pull_request.labels
                               or not config.update.require_automerge_label)
    if (need_branch_update and not merging and config.update.always
            and meets_label_requirement):
        await set_status(
            "🔄 updating branch",
            markdown_content=
            "branch updated because `update.always = true` is configured.",
        )
        await api.update_branch()
        return

    if (config.merge.require_automerge_label
            and config.merge.automerge_label not in pull_request.labels):
        await block_merge(
            api,
            pull_request,
            f"missing automerge_label: {config.merge.automerge_label!r}",
        )
        return
    blacklist_labels = set(config.merge.blacklist_labels) & set(
        pull_request.labels)
    if blacklist_labels:
        await block_merge(api, pull_request,
                          f"has blacklist_labels: {blacklist_labels!r}")
        return

    if (config.merge.blacklist_title_regex
            and re.search(config.merge.blacklist_title_regex,
                          pull_request.title) is not None):
        await block_merge(
            api,
            pull_request,
            f"title matches blacklist_title_regex: {config.merge.blacklist_title_regex!r}",
        )
        return

    if pull_request.mergeStateStatus == MergeStateStatus.DRAFT:
        await block_merge(api, pull_request, "pull request is in draft state")
        return

    if config.merge.block_on_reviews_requested and review_requests:
        names = [r.name for r in review_requests]
        await block_merge(api, pull_request, f"reviews requested: {names!r}")
        return

    if pull_request.state == PullRequestState.MERGED:
        log.info(
            "pull request merged. config.merge.delete_branch_on_merge=%r",
            config.merge.delete_branch_on_merge,
        )
        await api.dequeue()
        if not config.merge.delete_branch_on_merge or pull_request.isCrossRepository:
            return
        pr_count = await api.pull_requests_for_ref(ref=pull_request.headRefName
                                                   )
        # if we couldn't access the dependent PR count or we have dependent PRs
        # we will abort deleting this branch.
        if pr_count is None or pr_count > 0:
            log.info("skipping branch deletion because of dependent PRs",
                     pr_count=pr_count)
            return
        await api.delete_branch(branch_name=pull_request.headRefName)
        return

    if pull_request.state == PullRequestState.CLOSED:
        await api.dequeue()
        return
    if (pull_request.mergeStateStatus == MergeStateStatus.DIRTY
            or pull_request.mergeable == MergeableState.CONFLICTING):
        await block_merge(api, pull_request, "merge conflict")
        # remove label if configured and send message
        if config.merge.notify_on_conflict and config.merge.require_automerge_label:
            automerge_label = config.merge.automerge_label
            await api.remove_label(automerge_label)
            body = textwrap.dedent(f"""
            This PR currently has a merge conflict. Please resolve this and then re-add the `{automerge_label}` label.
            """)
            await api.create_comment(body)
        return

    if pull_request.mergeStateStatus == MergeStateStatus.UNSTABLE:
        # TODO(chdsbd): This status means that the pr is mergeable but has failing
        # status checks. we may want to handle this via config
        pass

    if pull_request.mergeable == MergeableState.UNKNOWN:
        # we need to trigger a test commit to fix this. We do that by calling
        # GET on the pull request endpoint.
        await api.trigger_test_commit()
        return

    wait_for_checks = False
    if pull_request.mergeStateStatus in (
            MergeStateStatus.BLOCKED,
            MergeStateStatus.BEHIND,
    ):
        # figure out why we can't merge. There isn't a way to get this simply from the Github API. We need to find out ourselves.
        #
        # I think it's possible to find out blockers from branch protection issues
        # https://developer.github.com/v4/object/branchprotectionrule/?#fields
        #
        # - missing reviews
        # - blocking reviews
        # - missing required status checks
        # - failing required status checks
        # - branch not up to date (should be handled before this)
        # - missing required signature
        if (branch_protection.requiresApprovingReviews
                and branch_protection.requiredApprovingReviewCount):
            reviews_by_author: MutableMapping[
                str, List[PRReview]] = defaultdict(list)
            for review in sorted(reviews, key=lambda x: x.createdAt):
                if review.author.permission not in {
                        Permission.ADMIN, Permission.WRITE
                }:
                    continue
                reviews_by_author[review.author.login].append(review)

            successful_reviews = 0
            for author_name, review_list in reviews_by_author.items():
                review_state = review_status(review_list)
                # blocking review
                if review_state == PRReviewState.CHANGES_REQUESTED:
                    await block_merge(api, pull_request,
                                      f"changes requested by {author_name!r}")
                    return
                # successful review
                if review_state == PRReviewState.APPROVED:
                    successful_reviews += 1
            # missing required review count
            if successful_reviews < branch_protection.requiredApprovingReviewCount:
                await block_merge(
                    api,
                    pull_request,
                    f"missing required reviews, have {successful_reviews!r}/{branch_protection.requiredApprovingReviewCount!r}",
                )
                return

        required: Set[str] = set()
        passing: Set[str] = set()

        if branch_protection.requiresStatusChecks:
            skippable_contexts: List[str] = []
            failing_contexts: List[str] = []
            pending_contexts: List[str] = []
            passing_contexts: List[str] = []
            required = set(branch_protection.requiredStatusCheckContexts)
            for status_context in contexts:
                # handle dont_wait_on_status_checks. We want to consider a
                # status_check failed if it is incomplete and in the
                # configuration.
                if (status_context.context
                        in config.merge.dont_wait_on_status_checks
                        and status_context.state
                        in (StatusState.EXPECTED, StatusState.PENDING)):
                    skippable_contexts.append(status_context.context)
                    continue
                if status_context.state in (StatusState.ERROR,
                                            StatusState.FAILURE):
                    failing_contexts.append(status_context.context)
                elif status_context.state in (
                        StatusState.EXPECTED,
                        StatusState.PENDING,
                ):
                    pending_contexts.append(status_context.context)
                else:
                    assert status_context.state == StatusState.SUCCESS
                    passing_contexts.append(status_context.context)
            for check_run in check_runs:
                if (check_run.name in config.merge.dont_wait_on_status_checks
                        and check_run.conclusion
                        in (None, CheckConclusionState.NEUTRAL)):
                    skippable_contexts.append(check_run.name)
                    continue
                if check_run.conclusion is None:
                    continue
                if check_run.conclusion == CheckConclusionState.SUCCESS:
                    passing_contexts.append(check_run.name)
                if check_run.conclusion in (
                        CheckConclusionState.ACTION_REQUIRED,
                        CheckConclusionState.FAILURE,
                        CheckConclusionState.TIMED_OUT,
                ):
                    failing_contexts.append(check_run.name)
            passing = set(passing_contexts)
            failing = set(failing_contexts)
            # we have failing statuses that are required
            failing_required_status_checks = failing & required
            # GitHub has undocumented logic for travis-ci checks in GitHub
            # branch protection rules. GitHub compresses
            # "continuous-integration/travis-ci/{pr,push}" to
            # "continuous-integration/travis-ci". There is only special handling
            # for these specific checks.
            if "continuous-integration/travis-ci" in required:
                if "continuous-integration/travis-ci/pr" in failing:
                    failing_required_status_checks.add(
                        "continuous-integration/travis-ci/pr")
                if "continuous-integration/travis-ci/push" in failing:
                    failing_required_status_checks.add(
                        "continuous-integration/travis-ci/push")
                # either check can satisfy continuous-integration/travis-ci, but
                # if either fails they'll also block the merge.
                if ("continuous-integration/travis-ci/pr" in passing
                        or "continuous-integration/travis-ci/push" in passing):
                    required.remove("continuous-integration/travis-ci")
            if failing_required_status_checks:
                # NOTE(chdsbd): We need to skip this PR because it would block
                # the merge queue. We may be able to bump it to the back of the
                # queue, but it's easier just to remove it all together. There
                # is a similar question for the review counting.
                await block_merge(
                    api,
                    pull_request,
                    f"failing required status checks: {failing_required_status_checks!r}",
                )
                return
            if skippable_contexts:
                if merging:
                    if skippable_check_timeout > 0:
                        await set_status(
                            f"⛴ merging PR (waiting a bit for dont_wait_on_status_checks: {skippable_contexts!r})"
                        )
                        raise RetryForSkippableChecks
                    log.warning(
                        "timeout reached waiting for dont_wait_on_status_checks",
                        skippable_contexts=skippable_contexts,
                    )
                    await set_status(
                        f"⚠️ timeout reached for dont_wait_on_status_checks: {skippable_contexts!r}"
                    )
                await set_status(
                    f"🛑 not waiting for dont_wait_on_status_checks: {skippable_contexts!r}"
                )
                return

        missing_required_status_checks = required - passing
        wait_for_checks = bool(branch_protection.requiresStatusChecks
                               and missing_required_status_checks)

        if config.merge.update_branch_immediately and need_branch_update:
            await set_status(
                "🔄 updating branch",
                markdown_content=
                "branch updated because `merge.update_branch_immediately = true` is configured.",
            )
            await api.update_branch()
            return

        if merging:
            # prioritize branch updates over waiting for status checks to complete
            if config.merge.optimistic_updates:
                if need_branch_update:
                    await set_status("⛴ merging PR (updating branch)")
                    await api.update_branch()
                    raise PollForever
                if wait_for_checks:
                    await set_status(
                        f"⛴ merging PR (waiting for status checks: {missing_required_status_checks!r})"
                    )
                    raise PollForever
            # almost the same as the pervious case, but we prioritize status checks
            # over branch updates.
            else:
                if wait_for_checks:
                    await set_status(
                        f"⛴ merging PR (waiting for status checks: {missing_required_status_checks!r})"
                    )
                    raise PollForever
                if need_branch_update:
                    await set_status("⛴ merging PR (updating branch)")
                    await api.update_branch()
                    raise PollForever

        # if we reach this point and we don't need to wait for checks or update a branch we've failed to calculate why the PR is blocked. This should _not_ happen normally.
        if not (wait_for_checks or need_branch_update):
            await block_merge(api, pull_request,
                              "Merging blocked by GitHub requirements")
            log.warning("merge blocked for unknown reason")
            return
    ready_to_merge = not (wait_for_checks or need_branch_update)

    if config.merge.do_not_merge:
        if wait_for_checks:
            await set_status(
                f"⌛️ waiting for required status checks: {missing_required_status_checks!r}"
            )
        elif need_branch_update:
            await set_status(
                "⚠️ need branch update (suggestion: use merge.update_branch_immediately with merge.do_not_merge)",
                markdown_content="""\
When `merge.do_not_merge = true` is configured `merge.update_branch_immediately = true` \
is recommended so Kodiak can automatically update branches.

By default, Kodiak is efficient and only update branches when merging a PR, but \
when `merge.do_not_merge` is enabled, Kodiak never has that opportunity to \
update a branch during merge. `merge.update_branch_immediately = true` will \
trigger Kodiak to update branches whenever a PR is outdated and not failing any \
branch protection requirements.
""",
            )
        else:
            await set_status("✅ okay to merge")
        log.info(
            "eligible to merge, stopping because config.merge.do_not_merge is enabled."
        )
        return

    # okay to merge if we reach this point.

    if (config.merge.prioritize_ready_to_merge and ready_to_merge) or merging:
        merge_args = get_merge_body(config, pull_request)
        await set_status("⛴ attempting to merge PR (merging)")
        await api.merge(
            merge_method=merge_args.merge_method,
            commit_title=merge_args.commit_title,
            commit_message=merge_args.commit_message,
        )
    else:
        position_in_queue = await api.queue_for_merge()
        if position_in_queue is None:
            # this case should be rare/impossible.
            log.warning("couldn't find position for enqueued PR")
            return
        ordinal_position = inflection.ordinalize(position_in_queue + 1)
        if not is_active_merge:
            await set_status(
                f"📦 enqueued for merge (position={ordinal_position})")
        else:
            log.info(
                "not setting status message for enqueued job because is_active_merge=True"
            )
    return
Example #9
0
 def get_mail_text(self, trans_list):
     critical = any([entry.repeat > 0 for entry in trans_list])
     disabled = any([entry.post_disable for entry in trans_list])
     subject = "{}{}ICSW Transaction info for {}: {}".format(
         "[REP] " if critical else "",
         "[DIS] " if disabled else "",
         logging_tools.get_plural("transaction", len(trans_list)),
         ", ".join(sorted([_trans.name for _trans in trans_list])),
     )
     cur_time = time.time()
     REPORT_TIME = 3600
     # return a mail text body for the given transaction list
     mail_text = [
         "Local time: {}".format(time.ctime(cur_time)),
         "{} initiated:".format(logging_tools.get_plural("transaction", len(trans_list))),
         "",
     ] + [
         "   - service {}, action is {}{}{}".format(
             _trans.name,
             _trans.action,
             " for the {} time".format(
                 inflection.ordinalize(_trans.repeat + 1)
             ) if _trans.repeat else "",
             " (will be disabled)" if _trans.post_disable else "",
         ) for _trans in trans_list
     ] + [
         ""
     ]
     with self.get_cursor() as crsr:
         for _trans in trans_list:
             _srv_id = crsr.execute(
                 "SELECT idx FROM service WHERE name=?",
                 (_trans.name,),
             ).fetchone()[0]
             _states = crsr.execute(
                 "SELECT pstate, cstate, license_state, created, proc_info_str FROM state WHERE service=? AND created > ? ORDER BY -created",
                 (_srv_id, cur_time - REPORT_TIME)
             ).fetchall()
             _actions = crsr.execute(
                 "SELECT action, success, runtime, finished, created FROM action WHERE service=? AND created > ? ORDER BY -created",
                 (_srv_id, cur_time - REPORT_TIME)
             ).fetchall()
             mail_text.extend(
                 [
                     "{} and {} for service {} in the last {}:".format(
                         logging_tools.get_plural("state", len(_states)),
                         logging_tools.get_plural("action", len(_states)),
                         _trans.name,
                         logging_tools.get_diff_time_str(REPORT_TIME),
                     ),
                     "",
                 ] + [
                     "{} pstate={}, cstate={}, license_state={} [{}]".format(
                         time.ctime(int(_state[3])),
                         _state[0],
                         _state[1],
                         _state[2],
                         _state[4],
                     ) for _state in _states
                 ] + [
                     ""
                 ] + [
                     "{} action={}, runtime={:.2f} [{} / {}]".format(
                         time.ctime(int(_action[4])),
                         _action[0],
                         _action[2],
                         _action[1],
                         _action[3],
                     ) for _action in _actions
                 ]
             )
     return subject, mail_text
Example #10
0
async def webhook_event_consumer(*, connection: RedisConnection,
                                 webhook_queue: RedisWebhookQueue,
                                 queue_name: str) -> typing.NoReturn:
    """
    Worker to process incoming webhook events from redis

    1. process mergeability information and update github check status for pr
    2. enqueue pr into repo queue for merging, if mergeability passed
    """
    log = logger.bind(queue=queue_name)
    log.info("start webhook event consumer")

    while True:
        log.info("block for new webhook event")
        webhook_event_json: BlockingZPopReply = await connection.bzpopmin(
            [queue_name])
        webhook_event = WebhookEvent.parse_raw(webhook_event_json.value)
        async with Client(
                owner=webhook_event.repo_owner,
                repo=webhook_event.repo_name,
                installation_id=webhook_event.installation_id,
        ) as api_client:
            pull_request = PR(
                owner=webhook_event.repo_owner,
                repo=webhook_event.repo_name,
                number=webhook_event.pull_request_number,
                installation_id=webhook_event.installation_id,
                client=api_client,
            )
            is_merging = (await connection.get(
                webhook_event.get_merge_target_queue_name()
            ) == webhook_event.json())
            # trigger status updates
            m_res, event = await pull_request.mergeability()
            if event is None or m_res == MergeabilityResponse.NOT_MERGEABLE:
                # remove ineligible events from the merge queue
                await connection.zrem(webhook_event.get_merge_queue_name(),
                                      [webhook_event.json()])
                continue
            if m_res not in (
                    MergeabilityResponse.NEEDS_UPDATE,
                    MergeabilityResponse.NEED_REFRESH,
                    MergeabilityResponse.WAIT,
                    MergeabilityResponse.OK,
            ):
                raise Exception("Unknown MergeabilityResponse")

            # don't clobber statuses set in the merge loop
            # The following responses are okay to add to merge queue:
            #   + NEEDS_UPDATE - okay for merging
            #   + NEED_REFRESH - assume okay
            #   + WAIT - assume checks pass
            #   + OK - we've got the green
            webhook_event_jsons = await webhook_queue.enqueue_for_repo(
                event=webhook_event)
            if is_merging:
                continue

            position = find_position(webhook_event_jsons,
                                     webhook_event_json.value)
            if position is None:
                continue
            # use 1-based indexing
            humanized_position = inflection.ordinalize(position + 1)
            await pull_request.set_status(
                f"📦 enqueued for merge (position={humanized_position})")
Example #11
0
def ordinalize(number):
    __doc__ = inflection.ordinalize.__doc__
    return inflection.ordinalize(int(number))
Example #12
0
async def process_webhook_event(
    connection: RedisConnection,
    webhook_queue: RedisWebhookQueue,
    queue_name: str,
    log: structlog.BoundLogger,
) -> None:
    log.info("block for new webhook event")
    webhook_event_json: BlockingZPopReply = await connection.bzpopmin(
        [queue_name])
    webhook_event = WebhookEvent.parse_raw(webhook_event_json.value)
    async with Client(
            owner=webhook_event.repo_owner,
            repo=webhook_event.repo_name,
            installation_id=webhook_event.installation_id,
    ) as api_client:
        pull_request = PR(
            owner=webhook_event.repo_owner,
            repo=webhook_event.repo_name,
            number=webhook_event.pull_request_number,
            installation_id=webhook_event.installation_id,
            client=api_client,
        )
        is_merging = (await connection.get(
            webhook_event.get_merge_target_queue_name()
        ) == webhook_event.json())
        # trigger status updates
        m_res, event = await pull_request.mergeability()
        if event is None or m_res == MergeabilityResponse.NOT_MERGEABLE:
            # remove ineligible events from the merge queue
            await connection.zrem(webhook_event.get_merge_queue_name(),
                                  [webhook_event.json()])
            return
        if m_res == MergeabilityResponse.SKIPPABLE_CHECKS:
            log.info("skippable checks")
            return
        await update_pr_immediately_if_configured(m_res, event, pull_request,
                                                  log)

        if m_res not in (
                MergeabilityResponse.NEEDS_UPDATE,
                MergeabilityResponse.NEED_REFRESH,
                MergeabilityResponse.WAIT,
                MergeabilityResponse.OK,
                MergeabilityResponse.SKIPPABLE_CHECKS,
        ):
            raise Exception("Unknown MergeabilityResponse")

        # don't clobber statuses set in the merge loop
        # The following responses are okay to add to merge queue:
        #   + NEEDS_UPDATE - okay for merging
        #   + NEED_REFRESH - assume okay
        #   + WAIT - assume checks pass
        #   + OK - we've got the green
        webhook_event_jsons = await webhook_queue.enqueue_for_repo(
            event=webhook_event)
        if is_merging:
            return

        position = find_position(webhook_event_jsons, webhook_event_json.value)
        if position is None:
            return
        # use 1-based indexing
        humanized_position = inflection.ordinalize(position + 1)
        await pull_request.set_status(
            f"📦 enqueued for merge (position={humanized_position})")
Example #13
0
def generate_tests(
    prob: problem.Problem,
    executable_path: pathlib.Path,
    prefix: str,
    num_cases: int,
) -> bool:
    if not executable_path.is_file():
        with color_utils.ColorizeStderrError():
            print('ERROR:', executable_path, 'is not a file', file=sys.stderr)
        return False

    if not os.access(executable_path, os.X_OK):
        with color_utils.ColorizeStderrError():
            print('ERROR:',
                  executable_path,
                  'must be executable!',
                  file=sys.stderr)
        return False

    print('Generating',
          num_cases,
          'test cases for problem',
          prob.name,
          'with executable',
          executable_path,
          file=sys.stderr)

    existing_test_ids = set(prob.get_test_ids())
    id_gen = (prefix + '{:06d}'.format(i) for i in itertools.count())
    new_test_ids = list(
        itertools.islice(
            (test_id
             for test_id in id_gen if test_id not in existing_test_ids),
            num_cases))

    rng = random.SystemRandom()
    for i, test_id in enumerate(new_test_ids):
        seed = rng.getrandbits(31)
        print('Generating',
              inflection.ordinalize(i),
              'case',
              'with id',
              test_id,
              'and seed',
              seed,
              file=sys.stderr)
        input_path = prob.get_test_input_path(test_id)
        answer_path = prob.get_test_answer_path(test_id)

        if not _generate(i, seed, executable_path, input_path, answer_path):
            with color_utils.ColorizeStderrError():
                print('Terminating early due to error.', file=sys.stderr)
            return False

    with color_utils.ColorizeStderrGood():
        print('Successfully generated',
              num_cases,
              'test cases for',
              'problem',
              prob.name,
              file=sys.stderr)
    return True
Example #14
0
 def humanize(self, answer):
   if answer.instruction == Instruction.label:
     return 'with the label {}.'.format(answer.value)
   else:
     return 'in the {} position.'.format(ordinalize(answer.value))
Example #15
0
 def eliminate_ordinals_in_place(cls, keyword_processor):
     for i in range(1000):
         cls.__essential_remove(keyword_processor, inflection.ordinalize(i))
Example #16
0
async def mergeable(
    api: PRAPI,
    config: Union[config.V1, pydantic.ValidationError, toml.TomlDecodeError],
    config_str: str,
    config_path: str,
    pull_request: PullRequest,
    branch_protection: Optional[BranchProtectionRule],
    review_requests: List[PRReviewRequest],
    reviews: List[PRReview],
    contexts: List[StatusContext],
    check_runs: List[CheckRun],
    commits: List[Commit],
    valid_signature: bool,
    valid_merge_methods: List[MergeMethod],
    repository: RepoInfo,
    merging: bool,
    is_active_merge: bool,
    skippable_check_timeout: int,
    api_call_retries_remaining: int,
    api_call_errors: Sequence[APICallRetry],
    subscription: Optional[Subscription],
    app_id: Optional[str] = None,
) -> None:
    # TODO(chdsbd): Use structlog bind_contextvars to automatically set useful context (install id, repo, pr number).
    log = logger.bind(number=pull_request.number, url=pull_request.url)
    # we set is_active_merge when the PR is being merged from the merge queue.
    # We don't want to clobber any statuses set by that system, so we take no
    # action. If the PR becomes ineligible for merging that logic will handle
    # it.

    # rebase_fast_forward isn't determined via the GitHub UI and is always
    # available.
    valid_merge_methods = [
        *valid_merge_methods, MergeMethod.rebase_fast_forward
    ]

    async def set_status(msg: str,
                         markdown_content: Optional[str] = None) -> None:
        # don't clobber statuses set via merge loop.
        if is_active_merge:
            return
        await api.set_status(
            msg,
            latest_commit_sha=pull_request.latest_sha,
            markdown_content=markdown_content,
        )

    if not isinstance(config, V1):
        log.warning("problem fetching config")
        await set_status(
            '⚠️ Invalid configuration (Click "Details" for more info.)',
            markdown_content=get_markdown_for_config(config,
                                                     config_str=config_str,
                                                     git_path=config_path),
        )
        await api.dequeue()
        return

    if api_call_retries_remaining == 0:
        log.warning("timeout reached for api calls to GitHub")
        if api_call_errors:
            first_error = api_call_errors[0]
            await set_status(
                f"⚠️ problem contacting GitHub API with method {first_error.api_name!r}",
                markdown_content=get_markdown_for_api_call_errors(
                    errors=api_call_errors),
            )
        else:
            await set_status("⚠️ problem contacting GitHub API")
        return

    # if we have an app_id in the config then we only want to work on this repo
    # if our app_id from the environment matches the configuration.
    if config.app_id is not None and config.app_id != app_id:
        log.info("missing required app_id")
        await api.dequeue()
        return

    if branch_protection is None:
        await cfg_err(
            api,
            pull_request,
            f"missing branch protection for baseRef: {pull_request.baseRefName!r}",
        )
        return

    merge_method = get_merge_method(
        cfg_merge_method=config.merge.method,
        valid_merge_methods=valid_merge_methods,
        log=log,
        labels=pull_request.labels,
    )

    if (branch_protection.requiresCommitSignatures
            and merge_method == MergeMethod.rebase):
        await cfg_err(
            api,
            pull_request,
            '"Require signed commits" branch protection is only supported with "squash" or "merge" commits. Rebase is not supported by GitHub.',
        )
        return

    if merge_method not in valid_merge_methods:
        valid_merge_methods_str = [
            method.value for method in valid_merge_methods
        ]
        await cfg_err(
            api,
            pull_request,
            f"configured merge.method {merge_method.value!r} is invalid. Valid methods for repo are {valid_merge_methods_str!r}",
        )
        return

    if (not config.merge.do_not_merge and branch_protection.restrictsPushes and
            missing_push_allowance(branch_protection.pushAllowances.nodes)):
        await cfg_err(
            api,
            pull_request,
            "push restriction branch protection setting is missing push allowance for Kodiak",
            markdown_content=get_markdown_for_push_allowance_error(
                branch_name=pull_request.baseRefName),
        )
        return

    # we keep the configuration errors before the rest of the application logic
    # so configuration issues are surfaced as early as possible.

    if config.disable_bot_label in pull_request.labels:
        await api.dequeue()
        await api.set_status(
            f"🚨 kodiak disabled by disable_bot_label ({config.disable_bot_label}). Remove label to re-enable Kodiak.",
            latest_commit_sha=pull_request.latest_sha,
        )
        return

    if (app_config.SUBSCRIPTIONS_ENABLED and repository.is_private
            and subscription is not None
            and subscription.subscription_blocker is not None):
        # We only count private repositories in our usage calculations. A user
        # has an active subscription if a subscription exists in Redis and has
        # an empty subscription_blocker.
        #
        # We also ignore missing subscriptions. The web api will set
        # subscription blockers if usage exceeds limits.
        status_message = get_paywall_status_for_blocker(
            pull_request, subscription.subscription_blocker, log)
        if status_message is not None:
            await set_status(
                f"💳 subscription: {status_message}",
                markdown_content=get_markdown_for_paywall(),
            )
            return

    pull_request_labels = set(pull_request.labels)
    config_automerge_labels = ({config.merge.automerge_label} if isinstance(
        config.merge.automerge_label, str) else set(
            config.merge.automerge_label))
    pull_request_automerge_labels = config_automerge_labels.intersection(
        pull_request_labels)
    has_automerge_label = len(pull_request_automerge_labels) > 0

    should_dependency_automerge = (
        pull_request.author.login
        in config.merge.automerge_dependencies.usernames
        and dep_version_from_title(pull_request.title)
        in config.merge.automerge_dependencies.versions)

    # we should trigger mergeability checks whenever we encounter UNKNOWN.
    #
    # I don't foresee conflicts with checking configuration errors,
    # `config.disable_bot_label`, and the paywall before this code.
    #
    # Previously we had an issue where this code wasn't being entered because
    # `merge.blocking_title_regex` was checked first. Which caused
    # `update.always` to not operate.
    if (pull_request.mergeable == MergeableState.UNKNOWN
            and pull_request.state == PullRequestState.OPEN):
        # we need to trigger a test commit to fix this. We do that by calling
        # GET on the pull request endpoint.
        await api.trigger_test_commit()

        # queue the PR for evaluation again in case GitHub doesn't send another
        # webhook for the commit test.
        await api.requeue()

        # we don't want to abort the merge if we encounter this status check.
        # Just keep polling!
        if merging:
            raise PollForever

        return

    is_draft_pull_request = (pull_request.isDraft
                             or pull_request.mergeStateStatus
                             == MergeStateStatus.DRAFT)
    if (pull_request.author.login in config.approve.auto_approve_usernames
            and pull_request.state == PullRequestState.OPEN
            and not is_draft_pull_request):
        # if the PR was created by an approve author and we have not previously
        # given an approval, approve the PR.
        sorted_reviews = sorted(reviews, key=lambda x: x.createdAt)
        kodiak_reviews = [
            review for review in sorted_reviews
            if review.author.login == KODIAK_LOGIN
        ]
        status = review_status(kodiak_reviews)
        if status != PRReviewState.APPROVED:
            await api.approve_pull_request()
        else:
            log.info("approval already exists, not adding another")

    need_branch_update = (branch_protection.requiresStrictStatusChecks
                          and pull_request.mergeStateStatus
                          == MergeStateStatus.BEHIND)
    update_always = config.update.always and (
        has_automerge_label or not config.update.require_automerge_label)
    has_autoupdate_label = config.update.autoupdate_label in pull_request_labels
    auto_update_enabled = update_always or has_autoupdate_label

    # Dequeue pull request if out-of-date and author in
    # `update.ignored_usernames`. We cannot update or merge it.
    #
    # If `update.autoupdate_label` is applied to the pull request, bypass
    # `update.ignored_usernames` and let the pull request update.
    if need_branch_update and not has_autoupdate_label:
        if pull_request.author.login in config.update.blacklist_usernames:
            await set_status(
                f"🛑 updates blocked by update.blacklist_usernames: {config.update.blacklist_usernames!r}",
                markdown_content=
                "Apply the `update.autoupdate_label` label to enable updates for this pull request.",
            )
            await api.dequeue()
            return
        if pull_request.author.login in config.update.ignored_usernames:
            await set_status(
                f"🛑 updates blocked by update.ignored_usernames: {config.update.ignored_usernames!r}",
                markdown_content=
                "Apply the `update.autoupdate_label` label to enable updates for this pull request.",
            )
            await api.dequeue()
            return

    if need_branch_update and not merging and auto_update_enabled:
        await set_status(
            "🔄 updating branch",
            markdown_content=
            "branch updated because `update.always = true` is configured.",
        )
        await api.update_branch()
        return

    if (config.merge.require_automerge_label and not has_automerge_label
            and not should_dependency_automerge):
        await block_merge(
            api,
            pull_request,
            f"missing automerge_label: {config.merge.automerge_label!r}",
        )
        return

    # We want users to get notified a merge conflict even if the PR matches a
    # WIP title via merge.blacklist_title_regex.
    if (pull_request.mergeStateStatus == MergeStateStatus.DIRTY
            or pull_request.mergeable == MergeableState.CONFLICTING
        ) and pull_request.state == PullRequestState.OPEN:
        await block_merge(api, pull_request, "merge conflict")
        # remove label if configured and send message
        if (config.merge.notify_on_conflict
                and config.merge.require_automerge_label
                and has_automerge_label):
            automerge_label = config.merge.automerge_label
            await asyncio.gather(*[
                api.remove_label(label)
                for label in pull_request_automerge_labels
            ])
            body = textwrap.dedent(f"""
            This PR currently has a merge conflict. Please resolve this and then re-add the `{automerge_label}` label.
            """)
            await api.create_comment(body)
        return

    blacklist_labels = set(config.merge.blacklist_labels) & set(
        pull_request.labels)
    blocking_labels = set(config.merge.blocking_labels) & set(
        pull_request.labels)
    if blacklist_labels:
        await block_merge(api, pull_request,
                          f"has blacklist_labels: {blacklist_labels!r}")
        return
    if blocking_labels:
        await block_merge(api, pull_request,
                          f"has merge.blocking_labels: {blocking_labels!r}")
        return

    title_blocker = get_blocking_title_regex(config)

    if (title_blocker.pattern and re.search(title_blocker.pattern,
                                            pull_request.title) is not None):
        await block_merge(
            api,
            pull_request,
            f"title matches {title_blocker.config_key}: {title_blocker.pattern!r}",
        )
        return

    if is_draft_pull_request:
        await block_merge(api, pull_request, "pull request is in draft state")
        return

    if config.merge.block_on_reviews_requested and review_requests:
        names = [r.name for r in review_requests]
        await block_merge(api, pull_request, f"reviews requested: {names!r}")
        return

    if pull_request.state == PullRequestState.MERGED:
        log.info(
            "pull request merged. config.merge.delete_branch_on_merge=%r",
            config.merge.delete_branch_on_merge,
        )
        await api.dequeue()
        if (not config.merge.delete_branch_on_merge
                or pull_request.isCrossRepository
                or repository.delete_branch_on_merge):
            return
        pr_count = await api.pull_requests_for_ref(ref=pull_request.headRefName
                                                   )
        # if we couldn't access the dependent PR count or we have dependent PRs
        # we will abort deleting this branch.
        if pr_count is None or pr_count > 0:
            log.info("skipping branch deletion because of dependent PRs",
                     pr_count=pr_count)
            return
        await api.delete_branch(branch_name=pull_request.headRefName)
        return

    if pull_request.state == PullRequestState.CLOSED:
        await api.dequeue()
        return

    if pull_request.mergeStateStatus == MergeStateStatus.UNSTABLE:
        # TODO(chdsbd): This status means that the pr is mergeable but has failing
        # status checks. we may want to handle this via config
        pass

    wait_for_checks = False
    if pull_request.mergeStateStatus in (
            MergeStateStatus.BLOCKED,
            MergeStateStatus.BEHIND,
    ):
        # figure out why we can't merge. There isn't a way to get this simply from the Github API. We need to find out ourselves.
        #
        # I think it's possible to find out blockers from branch protection issues
        # https://developer.github.com/v4/object/branchprotectionrule/?#fields
        #
        # - missing reviews
        # - blocking reviews
        # - missing required status checks
        # - failing required status checks
        # - branch not up to date (should be handled before this)
        # - missing required signature
        if (branch_protection.requiresApprovingReviews
                and branch_protection.requiredApprovingReviewCount):
            reviews_by_author: MutableMapping[
                str, List[PRReview]] = defaultdict(list)
            for review in sorted(reviews, key=lambda x: x.createdAt):
                if review.author.permission not in {
                        Permission.ADMIN, Permission.WRITE
                }:
                    continue
                reviews_by_author[review.author.login].append(review)

            successful_reviews = 0
            for author_name, review_list in reviews_by_author.items():
                review_state = review_status(review_list)
                # blocking review
                if review_state == PRReviewState.CHANGES_REQUESTED:
                    await block_merge(api, pull_request,
                                      f"changes requested by {author_name!r}")
                    return
                # successful review
                if review_state == PRReviewState.APPROVED:
                    successful_reviews += 1
            # missing required review count
            if successful_reviews < branch_protection.requiredApprovingReviewCount:
                await block_merge(
                    api,
                    pull_request,
                    f"missing required reviews, have {successful_reviews!r}/{branch_protection.requiredApprovingReviewCount!r}",
                )
                return

        if pull_request.reviewDecision == PullRequestReviewDecision.REVIEW_REQUIRED:
            await block_merge(api, pull_request, "missing required reviews")
            return

        required: Set[str] = set()
        passing: Set[str] = set()

        if branch_protection.requiresStatusChecks:
            skippable_contexts: List[str] = []
            failing_contexts: List[str] = []
            pending_contexts: List[str] = []
            passing_contexts: List[str] = []
            required = set(branch_protection.requiredStatusCheckContexts)
            for status_context in contexts:
                # handle dont_wait_on_status_checks. We want to consider a
                # status_check failed if it is incomplete and in the
                # configuration.
                if (status_context.context
                        in config.merge.dont_wait_on_status_checks
                        and status_context.state
                        in (StatusState.EXPECTED, StatusState.PENDING)):
                    skippable_contexts.append(status_context.context)
                    continue
                if status_context.state in (StatusState.ERROR,
                                            StatusState.FAILURE):
                    failing_contexts.append(status_context.context)
                elif status_context.state in (
                        StatusState.EXPECTED,
                        StatusState.PENDING,
                ):
                    pending_contexts.append(status_context.context)
                else:
                    assert status_context.state == StatusState.SUCCESS
                    passing_contexts.append(status_context.context)
            for check_run in check_runs:
                if (check_run.name in config.merge.dont_wait_on_status_checks
                        and check_run.conclusion
                        in (None, CheckConclusionState.NEUTRAL)):
                    skippable_contexts.append(check_run.name)
                    continue
                if check_run.conclusion is None:
                    continue
                if check_run.conclusion == CheckConclusionState.SUCCESS:
                    passing_contexts.append(check_run.name)
                if check_run.conclusion in (
                        CheckConclusionState.ACTION_REQUIRED,
                        CheckConclusionState.FAILURE,
                        CheckConclusionState.TIMED_OUT,
                        CheckConclusionState.CANCELLED,
                        CheckConclusionState.SKIPPED,
                        CheckConclusionState.STALE,
                ):
                    failing_contexts.append(check_run.name)
            passing = set(passing_contexts)
            failing = set(failing_contexts)
            # we have failing statuses that are required
            failing_required_status_checks = failing & required
            # GitHub has undocumented logic for travis-ci checks in GitHub
            # branch protection rules. GitHub compresses
            # "continuous-integration/travis-ci/{pr,push}" to
            # "continuous-integration/travis-ci". There is only special handling
            # for these specific checks.
            if "continuous-integration/travis-ci" in required:
                if "continuous-integration/travis-ci/pr" in failing:
                    failing_required_status_checks.add(
                        "continuous-integration/travis-ci/pr")
                if "continuous-integration/travis-ci/push" in failing:
                    failing_required_status_checks.add(
                        "continuous-integration/travis-ci/push")
                # either check can satisfy continuous-integration/travis-ci, but
                # if either fails they'll also block the merge.
                if ("continuous-integration/travis-ci/pr" in passing
                        or "continuous-integration/travis-ci/push" in passing):
                    required.remove("continuous-integration/travis-ci")
            if failing_required_status_checks:
                # NOTE(chdsbd): We need to skip this PR because it would block
                # the merge queue. We may be able to bump it to the back of the
                # queue, but it's easier just to remove it all together. There
                # is a similar question for the review counting.
                await block_merge(
                    api,
                    pull_request,
                    f"failing required status checks: {failing_required_status_checks!r}",
                )
                return
            if skippable_contexts:
                if merging:
                    if skippable_check_timeout > 0:
                        await set_status(
                            f"⛴ merging PR (waiting a bit for dont_wait_on_status_checks: {skippable_contexts!r})"
                        )
                        raise RetryForSkippableChecks
                    log.warning(
                        "timeout reached waiting for dont_wait_on_status_checks",
                        skippable_contexts=skippable_contexts,
                    )
                    await set_status(
                        f"⚠️ timeout reached for dont_wait_on_status_checks: {skippable_contexts!r}"
                    )
                await set_status(
                    f"🛑 not waiting for dont_wait_on_status_checks: {skippable_contexts!r}"
                )
                return

        missing_required_status_checks = required - passing
        wait_for_checks = bool(branch_protection.requiresStatusChecks
                               and missing_required_status_checks)

        if config.merge.update_branch_immediately and need_branch_update:
            await set_status(
                "🔄 updating branch",
                markdown_content=
                "branch updated because `merge.update_branch_immediately = true` is configured.",
            )
            await api.update_branch()
            if merging:
                raise PollForever
            return

        if merging:
            # prioritize branch updates over waiting for status checks to complete
            if config.merge.optimistic_updates:
                if need_branch_update:
                    await set_status("⛴ merging PR (updating branch)")
                    await api.update_branch()
                    raise PollForever
                if wait_for_checks:
                    await set_status(
                        f"⛴ merging PR (waiting for status checks: {missing_required_status_checks!r})"
                    )
                    raise PollForever
            # almost the same as the pervious case, but we prioritize status checks
            # over branch updates.
            else:
                if wait_for_checks:
                    await set_status(
                        f"⛴ merging PR (waiting for status checks: {missing_required_status_checks!r})"
                    )
                    raise PollForever
                if need_branch_update:
                    await set_status("⛴ merging PR (updating branch)")
                    await api.update_branch()
                    raise PollForever

        # if we reach this point and we don't need to wait for checks or update a branch we've failed to calculate why the PR is blocked. This should _not_ happen normally.
        if not (wait_for_checks or need_branch_update):
            await block_merge(api, pull_request,
                              "Merging blocked by GitHub requirements")
            log.warning("merge blocked for unknown reason")
            return
    ready_to_merge = not (wait_for_checks or need_branch_update)

    if config.merge.do_not_merge:
        if wait_for_checks:
            await set_status(
                f"⌛️ waiting for required status checks: {missing_required_status_checks!r}"
            )
        elif need_branch_update:
            await set_status(
                "⚠️ need branch update (suggestion: use merge.update_branch_immediately with merge.do_not_merge)",
                markdown_content="""\
When `merge.do_not_merge = true` is configured `merge.update_branch_immediately = true` \
is recommended so Kodiak can automatically update branches.

By default, Kodiak is efficient and only update branches when merging a PR, but \
when `merge.do_not_merge` is enabled, Kodiak never has that opportunity to \
update a branch during merge. `merge.update_branch_immediately = true` will \
trigger Kodiak to update branches whenever a PR is outdated and not failing any \
branch protection requirements.
""",
            )
        else:
            await set_status("✅ okay to merge")
        log.info(
            "eligible to merge, stopping because config.merge.do_not_merge is enabled."
        )
        return

    # okay to merge if we reach this point.

    if (config.merge.prioritize_ready_to_merge and ready_to_merge) or merging:
        merge_args = get_merge_body(config,
                                    merge_method,
                                    pull_request,
                                    commits=commits)
        await set_status("⛴ attempting to merge PR (merging)")
        try:
            # Use the Git Refs API to rebase merge.
            #
            # This preserves the rebased commits and their hashes. Using the
            # GitHub Pull Request API to rebase merge rewrites the rebased
            # commits, so the commit hashes change.
            #
            # For build systems that depend on commit hashes instead of tree
            # hashes, it's desirable to not rewrite commits.
            if merge_args.merge_method is MergeMethod.rebase_fast_forward:
                await api.update_ref(ref=pull_request.baseRefName,
                                     sha=pull_request.latest_sha)
            else:
                await api.merge(
                    merge_method=merge_args.merge_method,
                    commit_title=merge_args.commit_title,
                    commit_message=merge_args.commit_message,
                )
        # if we encounter an internal server error (status code 500), it is
        # _not_ safe to retry. Instead we mark the pull request as unmergable
        # and require a user to re-enable Kodiak on the pull request.
        except GitHubApiInternalServerError:
            logger.warning(
                "kodiak encountered GitHub API error merging pull request",
                exc_info=True,
            )
            # We add the disable_bot_label to disable Kodiak from taking any
            # action to update, approve, comment, label, or merge.
            disable_bot_label = config.disable_bot_label
            await api.add_label(disable_bot_label)

            await block_merge(api, pull_request,
                              "Cannot merge due to GitHub API failure.")
            body = messages.format(
                textwrap.dedent(f"""
            This PR could not be merged because the GitHub API returned an internal server error. To enable Kodiak on this pull request please remove the `{disable_bot_label}` label.

            When the GitHub API returns an internal server error (HTTP status code 500), it is not safe for Kodiak to retry merging.

            For more information please see https://kodiakhq.com/docs/troubleshooting#merge-errors
            """))
            await api.create_comment(body)
        else:
            await set_status("merge complete 🎉")

    else:
        priority_merge = config.merge.priority_merge_label in pull_request.labels
        position_in_queue = await api.queue_for_merge(first=priority_merge)
        if position_in_queue is None:
            # this case should be rare/impossible.
            log.warning("couldn't find position for enqueued PR")
            return
        ordinal_position = inflection.ordinalize(position_in_queue + 1)
        if not is_active_merge:
            await set_status(
                f"📦 enqueued for merge (position={ordinal_position})")
        else:
            log.info(
                "not setting status message for enqueued job because is_active_merge=True"
            )
    return
Example #17
0
async def process_webhook_event(
    connection: RedisConnection,
    webhook_queue: RedisWebhookQueue,
    queue_name: str,
    log: structlog.BoundLogger,
) -> None:
    log.info("block for new webhook event")
    webhook_event_json: BlockingZPopReply = await connection.bzpopmin(
        [queue_name])
    webhook_event = WebhookEvent.parse_raw(webhook_event_json.value)
    async with Client(
            owner=webhook_event.repo_owner,
            repo=webhook_event.repo_name,
            installation_id=webhook_event.installation_id,
    ) as api_client:
        pull_request = PR(
            owner=webhook_event.repo_owner,
            repo=webhook_event.repo_name,
            number=webhook_event.pull_request_number,
            installation_id=webhook_event.installation_id,
            client=api_client,
        )
        is_merging = (await connection.get(
            webhook_event.get_merge_target_queue_name()
        ) == webhook_event.json())
        # trigger status updates
        m_res, event = await pull_request.mergeability()
        if event is None or m_res == MergeabilityResponse.NOT_MERGEABLE:
            # remove ineligible events from the merge queue
            await connection.zrem(webhook_event.get_merge_queue_name(),
                                  [webhook_event.json()])
            return
        if m_res == MergeabilityResponse.SKIPPABLE_CHECKS:
            log.info("skippable checks")
            return
        await update_pr_immediately_if_configured(m_res, event, pull_request,
                                                  log)

        if m_res not in (
                MergeabilityResponse.NEEDS_UPDATE,
                MergeabilityResponse.NEED_REFRESH,
                MergeabilityResponse.WAIT,
                MergeabilityResponse.OK,
                MergeabilityResponse.SKIPPABLE_CHECKS,
        ):
            raise Exception("Unknown MergeabilityResponse")

        if isinstance(event.config, V1) and event.config.merge.do_not_merge:
            # we duplicate the status messages found in the mergeability
            # function here because status messages for WAIT and NEEDS_UPDATE
            # are only set when Kodiak hits the merging logic.
            if m_res == MergeabilityResponse.WAIT:
                await pull_request.set_status(summary="⌛️ waiting for checks")
            if m_res in {
                    MergeabilityResponse.OK,
                    MergeabilityResponse.SKIPPABLE_CHECKS,
            }:
                await pull_request.set_status(summary="✅ okay to merge")
            log.debug(
                "skipping merging for PR because `merge.do_not_merge` is configured."
            )
            return

        if (isinstance(event.config, V1)
                and event.config.merge.prioritize_ready_to_merge
                and m_res == MergeabilityResponse.OK):
            merge_success = await pull_request.merge(event)
            if merge_success:
                return
            log.error("problem merging PR")

        # don't clobber statuses set in the merge loop
        # The following responses are okay to add to merge queue:
        #   + NEEDS_UPDATE - okay for merging
        #   + NEED_REFRESH - assume okay
        #   + WAIT - assume checks pass
        #   + OK - we've got the green
        webhook_event_jsons = await webhook_queue.enqueue_for_repo(
            event=webhook_event)
        if is_merging:
            return

        position = find_position(webhook_event_jsons, webhook_event_json.value)
        if position is None:
            return
        # use 1-based indexing
        humanized_position = inflection.ordinalize(position + 1)
        await pull_request.set_status(
            f"📦 enqueued for merge (position={humanized_position})")
Example #18
0
def test_ordinalize(number, ordinalized):
    assert ordinalized == inflection.ordinalize(number)
Example #19
0
def get_team_data(tid):

    # Request sb home page
    search_url = "http://www.smallball.com/ball/home/index.shtml"
    response = request.urlopen(search_url)

    # Parse html from repsonse
    soup = BeautifulSoup(response, 'html.parser')
    season = soup.find('td', attrs={'class': 'text_22'})
    season = season.text.strip()
    season = season[7:]

    # Post to SB search page
    search_data = {"oid": tid, "Search": "search"}
    search_url = "http://smallball.com/go/on.ball.team"
    data = parse.urlencode(search_data).encode()
    response = request.urlopen(search_url, data)

    # Parse html from post repsonse
    soup = BeautifulSoup(response, 'html.parser')

    # Get team name
    team_name = soup.find('td', attrs={'class': 'table_top_name'})
    team_name = team_name.text.strip()

    # Get team id
    team_id = soup.find('td', attrs={'class': 'table_top_id'})
    team_id = team_id.text.strip()
    team_id = team_id[5:]

    # Get team star level and last trained

    team_star = soup.find('td', attrs={'class': 'table_top_2'})
    team_star_level = team_star.find('img')['src']
    team_star_level = team_star_level[18:]
    team_star_level = team_star_level[:-4]
    team_last_trained = (team_star.text.strip()).title()

    # Get team PL and PL position

    leagues = soup.find_all('td', attrs={'class': 'table_team_bottom1'})
    league = leagues[3]
    league = league.find('a')
    league = league.get('href')
    league = str(league).replace("/ball/leagues/pro","")
    pl = str(league).replace(".shtml","")

    position = soup.find_all('td', attrs={'class': 'table_team_bottom2'})
    position = position[3].text.strip()
    if position != "n/a":
        pl_position = inflection.ordinalize(position)
    else:
        pl_position = "n/a"

    # Get home stadium

    stadium_id = soup.find('img', attrs={'border': '2'})

    if "None" in str(stadium_id):
        stadium = "park2"
    if "park4" in str(stadium_id):
        stadium = "park4"
    if "park5" in str(stadium_id):
        stadium = "park5"
    if "park6" in str(stadium_id):
        stadium = "park6"
    if "park7" in str(stadium_id):
        stadium = "park7"
    if "park8" in str(stadium_id):
        stadium = "park8"
    if "park9" in str(stadium_id):
        stadium = "park9"
    if "park10" in str(stadium_id):
        stadium = "park10"
    if "park11" in str(stadium_id):
        stadium = "park11"
    if "park12" in str(stadium_id):
        stadium = "park12"
    if "park13" in str(stadium_id):
        stadium = "park13"

    # Get wins/losses and runs scored/against
    game_data = soup.find_all('td', attrs={'class': 'recent_text_right'})

    game_score = []
    game_result = []
    count = 0
    game_wins = 0
    game_losses = 0
    game_runs_for = 0
    game_runs_against = 0

    for entry in game_data:
        if 'width="10%"' in str(entry) and '?' not in str(entry):
            game_result.append(entry.text.strip())
        if 'width="13%"' in str(entry) and 'n/a' not in str(entry):
            game_score.append(entry.text.strip())

    for i in game_result:
        score_split = game_score[count].split(":")
        if i == "W":
            game_wins += 1
            if int(score_split[0]) > int(score_split[1]):
                game_runs_for += int(score_split[0])
                game_runs_against += int(score_split[1])
            else:
                game_runs_for += int(score_split[1])
                game_runs_against += int(score_split[0])
        if i == "L":
            game_losses += 1
            if int(score_split[0]) < int(score_split[1]):
                game_runs_for += int(score_split[0])
                game_runs_against += int(score_split[1])
            else:
                game_runs_for += int(score_split[1])
                game_runs_against += int(score_split[0])
        count += 1

    # Calculate pct

    if int(game_wins) == 0:
        game_pct = "0"
    else:
        game_pct = int(game_wins) / (int(game_wins) + int(game_losses))
        game_pct = format(game_pct, '.3f')

        if game_pct[0] == "0":
            game_pct = game_pct[1:]

    # Get a list of player names
    team = soup.find('table', attrs={'class': 'table_team_text'})
    data = team.find_all('td', 'table_team_name')
    players = []
    for player in data:
        players.append(player.text.strip())

    # Store data in objects
    myteam = Team(team_name)
    id = 1

    for name in players:
        rawstats = (team.find("td", string=name)).parent
        out = (rawstats.text.strip()).splitlines()

        player = Player(str(id),
                        name,
                        out[1],
                        out[2].lstrip(),
                        out[3].lstrip(),
                        out[4].lstrip(),
                        out[5].lstrip(),
                        out[6].lstrip(),
                        out[7].lstrip(),
                        out[8].lstrip(),
                        out[9].lstrip(),
                        out[10].lstrip(),
                        out[11].lstrip(),
                        out[12])

        player.slg = player.compute_slg()

        myteam.add_player(player)

        id += 1

    myteam.get_team_stats()
    myteam.get_team_pitchers()
    myteam.tid = team_id
    myteam.season = season
    myteam.stars = int(team_star_level)
    myteam.trained = team_last_trained
    myteam.pl = pl
    myteam.pl_position = pl_position
    myteam.stadium = stadium
    myteam.wins = game_wins
    myteam.losses = game_losses
    myteam.pct = game_pct
    myteam.rs = game_runs_for
    myteam.ra = game_runs_against

    return myteam