コード例 #1
0
ファイル: systems.py プロジェクト: interuss/dss
def get_resources(target_resources: List[Any], namespace: V1Namespace,
                  clients: Clients, log: BoundLogger,
                  cluster_name: str) -> List[Any]:
    existing_resources = []
    for target_resource in target_resources:
        if target_resource.__class__ == V1Deployment:
            existing_resource = deploylib.deployments.get(
                clients.apps, log, namespace, target_resource)
        elif target_resource.__class__ == V1Ingress:
            existing_resource = deploylib.ingresses.get(
                clients.networking, log, namespace, target_resource)
        elif target_resource.__class__ == V1Namespace:
            existing_resource = deploylib.namespaces.get(
                clients.core, log, target_resource)
        elif target_resource.__class__ == V1Service:
            existing_resource = deploylib.services.get(clients.core, log,
                                                       namespace,
                                                       target_resource)
        else:
            raise NotImplementedError('Getting {} is not yet supported'.format(
                target_resource.__class__))

        if existing_resource is None:
            log.warn(
                'No existing {} {} found in `{}` namespace of `{}` cluster'.
                format(target_resource.metadata.name,
                       target_resource.__class__.__name__,
                       namespace.metadata.name, cluster_name))
        existing_resources.append(existing_resource)
    return existing_resources
コード例 #2
0
async def process_repo_queue(log: structlog.BoundLogger,
                             connection: RedisConnection,
                             queue_name: str) -> None:
    log.info("block for new repo event")
    webhook_event_json: BlockingZPopReply = await connection.bzpopmin(
        [queue_name])
    webhook_event = WebhookEvent.parse_raw(webhook_event_json.value)
    # mark this PR as being merged currently. we check this elsewhere to set proper status codes
    await connection.set(webhook_event.get_merge_target_queue_name(),
                         webhook_event.json())

    async def dequeue() -> None:
        await connection.zrem(webhook_event.get_merge_queue_name(),
                              [webhook_event.json()])

    async def queue_for_merge() -> Optional[int]:
        raise NotImplementedError

    log.info("evaluate PR for merging")
    await evaluate_pr(
        install=webhook_event.installation_id,
        owner=webhook_event.repo_owner,
        repo=webhook_event.repo_name,
        number=webhook_event.pull_request_number,
        dequeue_callback=dequeue,
        merging=True,
        is_active_merging=False,
        queue_for_merge_callback=queue_for_merge,
    )
コード例 #3
0
def check_variable_data(data_var: xr.DataArray,
                        log: structlog.BoundLogger = LOGGER) -> None:
    for dim in data_var.dims:
        if dim not in CDM_COORDS:
            log.warning(f"unknown dimension '{dim}'")
        elif dim not in data_var.coords:
            log.error(f"dimension with no associated coordinate '{dim}'")
コード例 #4
0
def guess_definition(
    attrs: T.Dict[str, str],
    definitions: T.Dict[str, T.Dict[str, str]],
    log: structlog.BoundLogger = LOGGER,
) -> T.Dict[str, str]:
    standard_name = attrs.get("standard_name")
    if standard_name is not None:
        log = log.bind(standard_name=standard_name)
        matching_variables = []
        for var_name, var_def in definitions.items():
            if var_def.get("standard_name") == standard_name:
                matching_variables.append(var_name)
        if len(matching_variables) == 0:
            log.warning("'standard_name' attribute not valid")
        elif len(matching_variables) == 1:
            expected_name = matching_variables[0]
            log.warning("wrong name for variable", expected_name=expected_name)
            return definitions[expected_name]
        else:
            log.warning(
                "variables with matching 'standard_name':",
                matching_variables=matching_variables,
            )
    else:
        log.warning("missing recommended attribute 'standard_name'")
    return {}
コード例 #5
0
def sanitise_mapping(
        mapping: T.Mapping[T.Hashable, T.Any],
        log: structlog.BoundLogger = LOGGER) -> T.Dict[str, T.Any]:
    clean = {}
    for key, value in mapping.items():
        if isinstance(key, str):
            clean[key] = value
        else:
            key_repr = repr(key)
            log.warning("non-string key", key=key_repr)
            clean[key_repr] = value
    return clean
コード例 #6
0
async def update_pr_immediately_if_configured(
    m_res: MergeabilityResponse,
    event: EventInfoResponse,
    pull_request: PR,
    log: structlog.BoundLogger,
) -> None:
    if (m_res == MergeabilityResponse.NEEDS_UPDATE
            and isinstance(event.config, V1)
            and event.config.merge.update_branch_immediately):
        log.info("updating pull request")
        if not await update_pr_with_retry(pull_request):
            log.error("failed to update branch")
            await pull_request.set_status(summary="🛑 could not update branch")
コード例 #7
0
def get_resource(list_resources: Callable[[], Any], log: BoundLogger,
                 resource_type: str, resource_name: str) -> Optional[Any]:
    log.msg('Checking for existing {}'.format(resource_type),
            name=resource_name)
    resource_list = list_resources()
    matching_resources = [
        d for d in resource_list.items if d.metadata.name == resource_name
    ]
    if len(matching_resources) > 2:
        raise ValueError('Found {} {}s matching `{}`'.format(
            len(matching_resources), resource_type, resource_name))
    if not matching_resources:
        return None
    return matching_resources[0]
コード例 #8
0
ファイル: evaluation.py プロジェクト: etiennetremel/kodiak
def get_paywall_status_for_blocker(
    pull_request: PullRequest,
    subscription_blocker: Union[SubscriptionExpired, TrialExpired, SeatsExceeded],
    log: structlog.BoundLogger,
) -> Optional[str]:
    if isinstance(subscription_blocker, SeatsExceeded):
        if pull_request.author.databaseId in subscription_blocker.allowed_user_ids:
            return None
        return "usage has exceeded licensed seats"
    if isinstance(subscription_blocker, TrialExpired):
        return "trial ended"
    if isinstance(subscription_blocker, SubscriptionExpired):
        return "subscription expired"
    log.warning("unexpected subscription_blocker %s ", subscription_blocker)
    return None
コード例 #9
0
def check_coordinate_data(
    coord_name: T.Hashable,
    coord: xr.DataArray,
    increasing: bool = True,
    log: structlog.BoundLogger = LOGGER,
) -> None:
    diffs = coord.diff(coord_name).values
    zero = 0
    if coord.dtype.name in TIME_DTYPE_NAMES:
        zero = np.timedelta64(0, "ns")
    if increasing:
        if (diffs <= zero).any():
            log.error("coordinate stored direction is not 'increasing'")
    else:
        if (diffs >= zero).any():
            log.error("coordinate stored direction is not 'decreasing'")
コード例 #10
0
ファイル: evaluation.py プロジェクト: etiennetremel/kodiak
def get_merge_method(
    cfg_merge_method: Optional[MergeMethod],
    valid_merge_methods: List[MergeMethod],
    log: structlog.BoundLogger,
) -> MergeMethod:
    if cfg_merge_method is not None:
        return cfg_merge_method

    # take the first valid merge method.
    for merge_method in MERGE_METHODS:
        if merge_method in valid_merge_methods:
            return merge_method

    # NOTE(chdsbd): I don't think the following code should be reachable in
    # production, but I don't want to blow things up with an assert.
    log.warning(
        "no merge methods selected.",
        cfg_merge_method=cfg_merge_method,
        valid_merge_methods=valid_merge_methods,
    )
    return MergeMethod.merge
コード例 #11
0
def check_dataset_data_vars(
    dataset_data_vars: T.Mapping[T.Hashable, xr.DataArray],
    log: structlog.BoundLogger = LOGGER,
) -> T.Tuple[T.Dict[str, xr.DataArray], T.Dict[str, xr.DataArray]]:
    data_vars = sanitise_mapping(dataset_data_vars, log=log)
    payload_vars = {}
    ancillary_vars = {}
    for name, var in data_vars.items():
        if name in CDM_ANCILLARY_VARS:
            ancillary_vars[name] = var
        else:
            payload_vars[name] = var
    if len(payload_vars) > 1:
        log.error(
            "file must have at most one non-auxiliary variable",
            data_vars=list(payload_vars),
        )
    for data_var_name, data_var in payload_vars.items():
        log = log.bind(data_var_name=data_var_name)
        check_variable(data_var_name, data_var, CDM_DATA_VARS, log=log)
    return payload_vars, ancillary_vars
コード例 #12
0
def check_variable(
    data_var_name: str,
    data_var: xr.DataArray,
    definitions: T.Dict[str, T.Dict[str, str]],
    log: structlog.BoundLogger = LOGGER,
) -> None:
    attrs = sanitise_mapping(data_var.attrs, log)
    if data_var_name in definitions:
        definition = definitions[data_var_name]
    else:
        log.warning("unexpected name for variable")
        definition = guess_definition(attrs, definitions, log)
    check_variable_attrs(data_var.attrs,
                         definition,
                         dtype=data_var.dtype.name,
                         log=log)
    check_variable_data(data_var, log=log)
    if data_var.dims == (data_var_name, ):
        increasing = definition.get("stored_direction",
                                    "increasing") == "increasing"
        check_coordinate_data(data_var_name, data_var, increasing, log)
コード例 #13
0
def upsert_resource(existing_resource: Optional[Any], target_resource: Any,
                    log: BoundLogger, resource_type: str,
                    create: Callable[[], Any], patch: Callable[[],
                                                               Any]) -> Any:
    if existing_resource is not None:
        if comparisons.specs_are_the_same(existing_resource, target_resource):
            log.msg('Existing {} does not need to be updated'.format(
                resource_type),
                    name=existing_resource.metadata.name)
            new_resource = existing_resource
        else:
            log.msg('Updating existing {}'.format(resource_type))
            new_resource = patch()
            log.msg('Updated {}'.format(resource_type),
                    name=new_resource.metadata.name)
    else:
        log.msg('Creating new {}'.format(resource_type))
        new_resource = create()
        log.msg('Created {}'.format(resource_type),
                name=new_resource.metadata.name)
    return new_resource
コード例 #14
0
async def process_webhook_event(
    connection: RedisConnection,
    webhook_queue: RedisWebhookQueue,
    queue_name: str,
    log: structlog.BoundLogger,
) -> None:
    log.info("block for new webhook event")
    webhook_event_json: BlockingZPopReply = await connection.bzpopmin(
        [queue_name])
    log.info("parsing webhook event")
    webhook_event = WebhookEvent.parse_raw(webhook_event_json.value)
    is_active_merging = (await connection.get(
        webhook_event.get_merge_target_queue_name()) == webhook_event.json())

    async def dequeue() -> None:
        await connection.zrem(webhook_event.get_merge_queue_name(),
                              [webhook_event.json()])

    async def queue_for_merge() -> Optional[int]:
        return await webhook_queue.enqueue_for_repo(event=webhook_event)

    log.info("evaluate pr for webhook event")
    await evaluate_pr(
        install=webhook_event.installation_id,
        owner=webhook_event.repo_owner,
        repo=webhook_event.repo_name,
        number=webhook_event.pull_request_number,
        merging=False,
        dequeue_callback=dequeue,
        queue_for_merge_callback=queue_for_merge,
        is_active_merging=is_active_merging,
    )
コード例 #15
0
ファイル: evaluation.py プロジェクト: jukben/kodiak
def get_merge_method(
    cfg_merge_method: Optional[MergeMethod],
    valid_merge_methods: List[MergeMethod],
    labels: List[str],
    log: structlog.BoundLogger,
) -> MergeMethod:

    # parse merge.method override label
    # example: `kodiak: merge.method = "rebase"`
    for label in labels:
        if not label.startswith("kodiak:"):
            continue
        # we have an existing label "kodiak:disabled". This label will not parse
        # here and will be ignored.
        _start, _sep, maybe_config = label.partition("kodiak:")
        try:
            merge_method_override = MergeMethodOverride.parse_obj(
                toml.loads(maybe_config)
            )
        except (toml.TomlDecodeError, pydantic.ValidationError):
            continue
        return merge_method_override.merge.method

    if cfg_merge_method is not None:
        return cfg_merge_method

    # take the first valid merge method.
    for merge_method in MERGE_METHODS:
        if merge_method in valid_merge_methods:
            return merge_method

    # NOTE(chdsbd): I don't think the following code should be reachable in
    # production, but I don't want to blow things up with an assert.
    log.warning(
        "no merge methods selected.",
        cfg_merge_method=cfg_merge_method,
        valid_merge_methods=valid_merge_methods,
    )
    return MergeMethod.merge
コード例 #16
0
def check_dataset_attrs(dataset_attrs: T.Mapping[T.Hashable, T.Any],
                        log: structlog.BoundLogger = LOGGER) -> None:
    attrs = sanitise_mapping(dataset_attrs, log)
    conventions = attrs.get("Conventions")
    if conventions is None:
        log.warning("missing required 'Conventions' global attribute")
    elif conventions not in {"CF-1.8", "CF-1.7", "CF-1.6"}:
        log.warning("invalid 'Conventions' value", conventions=conventions)

    for attr_name in CDM_ATTRS:
        if attr_name not in attrs:
            log.warning(f"missing recommended global attribute '{attr_name}'")
コード例 #17
0
async def process_repo_queue(log: structlog.BoundLogger,
                             connection: RedisConnection,
                             queue_name: str) -> None:
    log.info("block for new repo event")
    webhook_event_json: BlockingZPopReply = await connection.bzpopmin(
        [queue_name])
    webhook_event = WebhookEvent.parse_raw(webhook_event_json.value)
    target_name = webhook_event.get_merge_target_queue_name()
    # mark this PR as being merged currently. we check this elsewhere to set proper status codes
    await connection.set(target_name, webhook_event.json())
    await connection.set(target_name + ":time", str(webhook_event_json.score))

    async def dequeue() -> None:
        await connection.zrem(webhook_event.get_merge_queue_name(),
                              [webhook_event.json()])

    async def requeue() -> None:
        await connection.zadd(
            webhook_event.get_webhook_queue_name(),
            {webhook_event.json(): time.time()},
            only_if_not_exists=True,
        )

    async def queue_for_merge(*, first: bool) -> Optional[int]:
        raise NotImplementedError

    log.info("evaluate PR for merging")
    await evaluate_pr(
        install=webhook_event.installation_id,
        owner=webhook_event.repo_owner,
        repo=webhook_event.repo_name,
        number=webhook_event.pull_request_number,
        dequeue_callback=dequeue,
        requeue_callback=requeue,
        merging=True,
        is_active_merging=False,
        queue_for_merge_callback=queue_for_merge,
    )
    log.info("merge completed, remove target marker", target_name=target_name)
    await connection.delete([target_name])
    await connection.delete([target_name + ":time"])
コード例 #18
0
async def process_webhook_event(
    connection: RedisConnection,
    webhook_queue: RedisWebhookQueue,
    queue_name: str,
    log: structlog.BoundLogger,
) -> None:
    log.info("block for new webhook event")
    webhook_event_json: BlockingZPopReply = await connection.bzpopmin(
        [queue_name])
    webhook_event = WebhookEvent.parse_raw(webhook_event_json.value)
    async with Client(
            owner=webhook_event.repo_owner,
            repo=webhook_event.repo_name,
            installation_id=webhook_event.installation_id,
    ) as api_client:
        pull_request = PR(
            owner=webhook_event.repo_owner,
            repo=webhook_event.repo_name,
            number=webhook_event.pull_request_number,
            installation_id=webhook_event.installation_id,
            client=api_client,
        )
        is_merging = (await connection.get(
            webhook_event.get_merge_target_queue_name()
        ) == webhook_event.json())
        # trigger status updates
        m_res, event = await pull_request.mergeability()
        if event is None or m_res == MergeabilityResponse.NOT_MERGEABLE:
            # remove ineligible events from the merge queue
            await connection.zrem(webhook_event.get_merge_queue_name(),
                                  [webhook_event.json()])
            return
        if m_res == MergeabilityResponse.SKIPPABLE_CHECKS:
            log.info("skippable checks")
            return
        await update_pr_immediately_if_configured(m_res, event, pull_request,
                                                  log)

        if m_res not in (
                MergeabilityResponse.NEEDS_UPDATE,
                MergeabilityResponse.NEED_REFRESH,
                MergeabilityResponse.WAIT,
                MergeabilityResponse.OK,
                MergeabilityResponse.SKIPPABLE_CHECKS,
        ):
            raise Exception("Unknown MergeabilityResponse")

        if isinstance(event.config, V1) and event.config.merge.do_not_merge:
            # we duplicate the status messages found in the mergeability
            # function here because status messages for WAIT and NEEDS_UPDATE
            # are only set when Kodiak hits the merging logic.
            if m_res == MergeabilityResponse.WAIT:
                await pull_request.set_status(summary="⌛️ waiting for checks")
            if m_res in {
                    MergeabilityResponse.OK,
                    MergeabilityResponse.SKIPPABLE_CHECKS,
            }:
                await pull_request.set_status(summary="✅ okay to merge")
            log.debug(
                "skipping merging for PR because `merge.do_not_merge` is configured."
            )
            return

        if (isinstance(event.config, V1)
                and event.config.merge.prioritize_ready_to_merge
                and m_res == MergeabilityResponse.OK):
            merge_success = await pull_request.merge(event)
            if merge_success:
                return
            log.error("problem merging PR")

        # don't clobber statuses set in the merge loop
        # The following responses are okay to add to merge queue:
        #   + NEEDS_UPDATE - okay for merging
        #   + NEED_REFRESH - assume okay
        #   + WAIT - assume checks pass
        #   + OK - we've got the green
        webhook_event_jsons = await webhook_queue.enqueue_for_repo(
            event=webhook_event)
        if is_merging:
            return

        position = find_position(webhook_event_jsons, webhook_event_json.value)
        if position is None:
            return
        # use 1-based indexing
        humanized_position = inflection.ordinalize(position + 1)
        await pull_request.set_status(
            f"📦 enqueued for merge (position={humanized_position})")
コード例 #19
0
async def process_repo_queue(log: structlog.BoundLogger,
                             connection: RedisConnection,
                             queue_name: str) -> None:
    log.info("block for new repo event")
    webhook_event_json: BlockingZPopReply = await connection.bzpopmin(
        [queue_name])
    webhook_event = WebhookEvent.parse_raw(webhook_event_json.value)
    # mark this PR as being merged currently. we check this elsewhere to set proper status codes
    await connection.set(webhook_event.get_merge_target_queue_name(),
                         webhook_event.json())
    async with Client(
            owner=webhook_event.repo_owner,
            repo=webhook_event.repo_name,
            installation_id=webhook_event.installation_id,
    ) as api_client:
        pull_request = PR(
            owner=webhook_event.repo_owner,
            repo=webhook_event.repo_name,
            number=webhook_event.pull_request_number,
            installation_id=webhook_event.installation_id,
            client=api_client,
        )

        # mark that we're working on this PR
        await pull_request.set_status(summary="⛴ attempting to merge PR")
        skippable_check_timeout = 4
        while True:
            # there are two exits to this loop:
            # - OK MergeabilityResponse
            # - NOT_MERGEABLE MergeabilityResponse
            #
            # otherwise we continue to poll the Github API for a status change
            # from the other states: NEEDS_UPDATE, NEED_REFRESH, WAIT

            # TODO(chdsbd): Replace enum response with exceptions
            m_res, event = await pull_request.mergeability(merging=True)
            log = log.bind(res=m_res)
            if event is None or m_res == MergeabilityResponse.NOT_MERGEABLE:
                log.info("cannot merge")
                break
            if m_res == MergeabilityResponse.SKIPPABLE_CHECKS:
                if skippable_check_timeout:
                    skippable_check_timeout -= 1
                    await asyncio.sleep(RETRY_RATE_SECONDS)
                    continue
                await pull_request.set_status(
                    summary="⌛️ waiting a bit for skippable checks")
                break

            if m_res == MergeabilityResponse.NEEDS_UPDATE:
                log.info("update pull request and don't attempt to merge")

                if await update_pr_with_retry(pull_request):
                    continue
                log.error("failed to update branch")
                await pull_request.set_status(
                    summary="🛑 could not update branch")
                # break to find next PR to try and merge
                break
            elif m_res == MergeabilityResponse.NEED_REFRESH:
                # trigger a git mergeability check on Github's end and poll for result
                log.info("needs refresh")
                await pull_request.trigger_mergeability_check()
                continue
            elif m_res == MergeabilityResponse.WAIT:
                # continuously poll until we either get an OK or a failure for mergeability
                log.info("waiting for status checks")
                continue
            elif m_res == MergeabilityResponse.OK:
                # continue to try and merge
                pass
            else:
                raise Exception("Unknown MergeabilityResponse")

            retries = 5
            while retries:
                log.info("merge")
                if await pull_request.merge(event):
                    # success merging
                    break
                retries -= 1
                log.info("retry merge")
                await asyncio.sleep(RETRY_RATE_SECONDS)
            else:
                log.error("Exhausted attempts to merge pull request")
コード例 #20
0
async def process_webhook_event(
    connection: RedisConnection,
    webhook_queue: RedisWebhookQueue,
    queue_name: str,
    log: structlog.BoundLogger,
) -> None:
    log.info("block for new webhook event")
    webhook_event_json: BlockingZPopReply = await connection.bzpopmin(
        [queue_name])
    webhook_event = WebhookEvent.parse_raw(webhook_event_json.value)
    async with Client(
            owner=webhook_event.repo_owner,
            repo=webhook_event.repo_name,
            installation_id=webhook_event.installation_id,
    ) as api_client:
        pull_request = PR(
            owner=webhook_event.repo_owner,
            repo=webhook_event.repo_name,
            number=webhook_event.pull_request_number,
            installation_id=webhook_event.installation_id,
            client=api_client,
        )
        is_merging = (await connection.get(
            webhook_event.get_merge_target_queue_name()
        ) == webhook_event.json())
        # trigger status updates
        m_res, event = await pull_request.mergeability()
        if event is None or m_res == MergeabilityResponse.NOT_MERGEABLE:
            # remove ineligible events from the merge queue
            await connection.zrem(webhook_event.get_merge_queue_name(),
                                  [webhook_event.json()])
            return
        if m_res == MergeabilityResponse.SKIPPABLE_CHECKS:
            log.info("skippable checks")
            return
        await update_pr_immediately_if_configured(m_res, event, pull_request,
                                                  log)

        if m_res not in (
                MergeabilityResponse.NEEDS_UPDATE,
                MergeabilityResponse.NEED_REFRESH,
                MergeabilityResponse.WAIT,
                MergeabilityResponse.OK,
                MergeabilityResponse.SKIPPABLE_CHECKS,
        ):
            raise Exception("Unknown MergeabilityResponse")

        # don't clobber statuses set in the merge loop
        # The following responses are okay to add to merge queue:
        #   + NEEDS_UPDATE - okay for merging
        #   + NEED_REFRESH - assume okay
        #   + WAIT - assume checks pass
        #   + OK - we've got the green
        webhook_event_jsons = await webhook_queue.enqueue_for_repo(
            event=webhook_event)
        if is_merging:
            return

        position = find_position(webhook_event_jsons, webhook_event_json.value)
        if position is None:
            return
        # use 1-based indexing
        humanized_position = inflection.ordinalize(position + 1)
        await pull_request.set_status(
            f"📦 enqueued for merge (position={humanized_position})")
コード例 #21
0
def check_dataset_coords(dataset_coords: T.Mapping[T.Hashable, T.Any],
                         log: structlog.BoundLogger = LOGGER) -> None:
    coords = sanitise_mapping(dataset_coords, log=log)
    for coord_name, coord in coords.items():
        log = log.bind(coord_name=coord_name)
        check_variable(coord_name, coord, CDM_COORDS, log=log)
コード例 #22
0
ファイル: pull_request.py プロジェクト: chdsbd/kodiak
async def evaluate_pr(
    install: str,
    owner: str,
    repo: str,
    number: int,
    merging: bool,
    dequeue_callback: Callable[[], Awaitable[None]],
    requeue_callback: Callable[[], Awaitable[None]],
    queue_for_merge_callback: QueueForMergeCallback,
    is_active_merging: bool,
    log: structlog.BoundLogger,
) -> None:
    skippable_check_timeout = 4
    api_call_retries_remaining = 5
    api_call_errors = []  # type: List[APICallError]
    log = log.bind(owner=owner, repo=repo, number=number, merging=merging)
    while True:
        log.info("get_pr")
        try:
            pr = await asyncio.wait_for(
                get_pr(
                    install=install,
                    owner=owner,
                    repo=repo,
                    number=number,
                    dequeue_callback=dequeue_callback,
                    requeue_callback=requeue_callback,
                    queue_for_merge_callback=queue_for_merge_callback,
                ),
                timeout=60,
            )
            if pr is None:
                log.info("failed to get_pr")
                return
            try:
                await asyncio.wait_for(
                    mergeable(
                        api=pr,
                        subscription=pr.event.subscription,
                        config=pr.event.config,
                        config_str=pr.event.config_str,
                        config_path=pr.event.config_file_expression,
                        app_id=conf.GITHUB_APP_ID,
                        repository=pr.event.repository,
                        pull_request=pr.event.pull_request,
                        branch_protection=pr.event.branch_protection,
                        review_requests=pr.event.review_requests,
                        bot_reviews=pr.event.bot_reviews,
                        contexts=pr.event.status_contexts,
                        check_runs=pr.event.check_runs,
                        commits=pr.event.commits,
                        valid_merge_methods=pr.event.valid_merge_methods,
                        merging=merging,
                        is_active_merge=is_active_merging,
                        skippable_check_timeout=skippable_check_timeout,
                        api_call_errors=api_call_errors,
                        api_call_retries_remaining=api_call_retries_remaining,
                    ),
                    timeout=60,
                )
                log.info("evaluate_pr successful")
            except RetryForSkippableChecks:
                if skippable_check_timeout > 0:
                    skippable_check_timeout -= 1
                    log.info("waiting for skippable checks to pass")
                    await asyncio.sleep(RETRY_RATE_SECONDS)
                    continue
            except PollForever:
                log.info("polling")
                await asyncio.sleep(POLL_RATE_SECONDS)
                continue
            except ApiCallException as e:
                # if we have some api exception, it's likely a temporary error that
                # can be resolved by calling GitHub again.
                if api_call_retries_remaining:
                    api_call_errors.append(
                        APICallError(
                            api_name=e.method,
                            http_status=str(e.status_code),
                            response_body=str(e.response),
                        )
                    )
                    api_call_retries_remaining -= 1
                    log.info("problem contacting remote api. retrying")
                    continue
                log.warning("api_call_retries_remaining", exc_info=True)
            return
        except asyncio.TimeoutError:
            # On timeout we add the PR to the back of the queue to try again.
            log.warning("mergeable_timeout", exc_info=True)
            await requeue_callback()
コード例 #23
0
def check_variable_attrs(
    variable_attrs: T.Mapping[T.Hashable, T.Any],
    definition: T.Dict[str, str],
    dtype: T.Optional[str] = None,
    log: structlog.BoundLogger = LOGGER,
) -> None:
    attrs = sanitise_mapping(variable_attrs, log)

    if "long_name" not in attrs:
        log.warning("missing recommended attribute 'long_name'")
    if "units" not in attrs:
        if dtype not in TIME_DTYPE_NAMES:
            log.warning("missing recommended attribute 'units'")
    else:
        units = attrs.get("units")
        expected_units = definition.get("units")
        if expected_units is not None:
            log = log.bind(expected_units=expected_units)
            cf_units = cfunits.Units(units)
            if not cf_units.isvalid:
                log.warning("'units' attribute not valid", units=units)
            else:
                expected_cf_units = cfunits.Units(expected_units)
                log = log.bind(units=units, expected_units=expected_units)
                if not cf_units.equivalent(expected_cf_units):
                    log.warning(
                        "'units' attribute not equivalent to the expected")
                elif not cf_units.equals(expected_cf_units):
                    log.warning("'units' attribute not equal to the expected")

    standard_name = attrs.get("standard_name")
    expected_standard_name = definition.get("standard_name")
    if expected_standard_name is not None:
        log = log.bind(expected_standard_name=expected_standard_name)
        if standard_name is None:
            log.warning("missing expected attribute 'standard_name'")
        elif standard_name != expected_standard_name:
            log.warning("'standard_name' attribute not valid",
                        standard_name=standard_name)
コード例 #24
0
ファイル: systems.py プロジェクト: interuss/dss
def delete_resources(existing_resources: List[Any], namespace: V1Namespace,
                     clients: Clients, log: BoundLogger):
    for existing_resource in existing_resources:
        if existing_resource is None:
            pass
        elif existing_resource.__class__ == V1Deployment:
            log.msg('Deleting deployment')
            status = clients.apps.delete_namespaced_deployment(
                name=existing_resource.metadata.name,
                namespace=namespace.metadata.name)
            log.msg('Deployment deleted', message=status.message)
        elif existing_resource.__class__ == V1Ingress:
            log.msg('Deleting ingress')
            status = clients.networking.delete_namespaced_ingress(
                name=existing_resource.metadata.name,
                namespace=namespace.metadata.name)
            log.msg('Ingress deleted', message=status.message)
        elif existing_resource.__class__ == V1Namespace:
            log.msg('Deleting namespace')
            status = clients.core.delete_namespace(
                name=namespace.metadata.name)
            log.msg('Namespace deleted', name=status.message)
        elif existing_resource.__class__ == V1Service:
            log.msg('Deleting service')
            svc = clients.core.delete_namespaced_service(
                name=existing_resource.metadata.name,
                namespace=namespace.metadata.name)
            log.msg('Service deleted', message=svc.metadata.name)
        else:
            raise NotImplementedError(
                'Deleting {} is not yet supported'.format(
                    existing_resource.__class__))