Example #1
0
    def calculate_range(
        self,
        repository_branch: str,
    ) -> str:
        repo = self.git_helper.repo
        branch_head = self.git_helper.fetch_head(ref=repository_branch)
        if not branch_head:
            fail(
                f'could not determine branch head of {repository_branch} branch'
            )
        range_start = _.head(
            self.reachable_release_tags_from_commit(
                repo=repo,
                commit=branch_head,
            ), )

        try:
            # better readable range_end by describing head commit
            range_end = repo.git.describe(branch_head, tags=True)
        except GitError as err:
            warning(
                'failed to describe branch head, maybe the repository has no tags? '
                f'GitError: {err}. Falling back to branch head commit hash.')
            range_end = branch_head.hexsha

        commit_range = f'{range_start}..{range_end}'
        return commit_range
Example #2
0
 def http_checker(*args, **kwargs):
     result = function(*args, **kwargs)
     if result.status_code < 200 or result.status_code >= 300:
         url = kwargs.get('url', None)
         warning('{c} - {m}: {u}'.format(c=result.status_code, m=result.content, u=url))
     result.raise_for_status()
     return result
Example #3
0
def calculate_range(
    repository_branch: str,
    git_helper: GitHelper,
    github_helper: GitHubRepositoryHelper,
) -> str:
    repo = git_helper.repo
    branch_head = git_helper.fetch_head(ref=repository_branch)
    if not branch_head:
        fail('could not determine branch head of {branch} branch'.format(
            branch=repository_branch))
    range_start = _.head(
        reachable_release_tags_from_commit(github_helper, repo, branch_head))

    try:
        # better readable range_end by describing head commit
        range_end = repo.git.describe(branch_head, tags=True)
    except GitError as err:
        warning(
            'failed to describe branch head, maybe the repository has no tags? '
            'GitError: {err}. Falling back to branch head commit hash.'.format(
                err=err))
        range_end = branch_head.hexsha

    commit_range = "{start}..{end}".format(start=range_start, end=range_end)
    return commit_range
Example #4
0
    def _process_definition_descriptor(self, definition_descriptor):
        if definition_descriptor.exception:
            return DeployResult(
                definition_descriptor=definition_descriptor,
                deploy_status=DeployStatus.SKIPPED,
                error_details=definition_descriptor.exception,
            )

        preprocessed = self.descriptor_preprocessor.process_definition_descriptor(
                definition_descriptor
        )
        result = self.definition_renderer.render(preprocessed)

        if self._pipeline_name_conflict(
            definition_descriptor=result.definition_descriptor,
        ):
            # early exit upon pipeline name conflict
            pipeline_name = result.definition_descriptor.pipeline_name
            warning(f'duplicate pipeline name: {pipeline_name}')
            return DeployResult(
                definition_descriptor=definition_descriptor,
                deploy_status=DeployStatus.SKIPPED,
                error_details=f'duplicate pipeline name: {pipeline_name}',
            )

        if result.render_status == RenderStatus.SUCCEEDED:
            deploy_result = self.definition_deployer.deploy(result.definition_descriptor)
        else:
            deploy_result = DeployResult(
                definition_descriptor=definition_descriptor,
                deploy_status=DeployStatus.SKIPPED,
                error_details=result.error_details,
            )
        return deploy_result
Example #5
0
    def _cfg_element(self, cfg_type_name: str, cfg_name: str):
        cfg_type = self._cfg_type(cfg_type_name=cfg_type_name)

        # retrieve model class c'tor - search module and sub-modules
        # TODO: switch to fully-qualified type names
        own_module = sys.modules[__name__]

        submodule_names = [
            own_module.__name__ + '.' + m.name
            for m in pkgutil.iter_modules(own_module.__path__)
        ]
        for module_name in [__name__] + submodule_names:
            submodule_name = module_name.split('.')[-1]
            if module_name != __name__:
                module = getattr(__import__(module_name), submodule_name)
            else:
                module = sys.modules[submodule_name]

            # skip if module does not define our type
            if not hasattr(module, cfg_type.cfg_type()):
                continue

            # if type is defined, validate
            element_type = getattr(module, cfg_type.cfg_type())
            if not type(element_type) == type:
                raise ValueError()
            # found it (write to cache as part of crazy workaround for kaniko)
            self._cfg_type_cache[cfg_type_name] = element_type
            break
        else:
            # workaround for kaniko, which will purge our poor modules on multi-stage-builds
            if cfg_type_name in self._cfg_type_cache:
                element_type = self._cfg_type_cache[cfg_type_name]
            else:
                print(f'{self._cfg_type_cache=}')
                raise ValueError(
                    f'failed to find cfg type: {cfg_type.cfg_type()=}')

        # for now, let's assume all of our model element types are subtypes of NamedModelElement
        # (with the exception of ConfigurationSet)
        configs = self._configs(cfg_type.cfg_type_name())
        if cfg_name not in configs:
            raise ConfigElementNotFoundError(
                'no such cfg element: {cn}. Known: {es}'.format(
                    cn=cfg_name, es=', '.join(configs.keys())))
        kwargs = {'raw_dict': configs[cfg_name]}

        if element_type == ConfigurationSet:
            kwargs.update({'cfg_name': cfg_name, 'cfg_factory': self})
        else:
            kwargs['name'] = cfg_name

        element_instance = element_type(**kwargs)

        try:
            element_instance.validate()
        except ModelValidationError as mve:
            warning(f'validation error for {cfg_name} - ignored: {mve}')

        return element_instance
Example #6
0
def destroy_concourse_landscape(config_name: str, release_name: str):
    # Fetch concourse and kubernetes config
    config_factory = global_ctx().cfg_factory()
    config_set = config_factory.cfg_set(cfg_name=config_name)
    concourse_cfg = config_set.concourse()

    kubernetes_config_name = concourse_cfg.kubernetes_cluster_config()
    kubernetes_config = config_factory.kubernetes(kubernetes_config_name)
    context = kube_ctx
    context.set_kubecfg(kubernetes_config.kubeconfig())

    # Delete helm release
    helm_cmd_path = ensure_helm_setup()
    KUBECONFIG_FILE_NAME = 'kubecfg'
    helm_env = os.environ.copy()
    helm_env['KUBECONFIG'] = KUBECONFIG_FILE_NAME

    with tempfile.TemporaryDirectory() as temp_dir:
        with open(os.path.join(temp_dir, KUBECONFIG_FILE_NAME), 'w') as f:
            yaml.dump(kubernetes_config.kubeconfig(), f)

        try:
            subprocess.run([helm_cmd_path, "delete", release_name],
                           env=helm_env,
                           check=True,
                           cwd=temp_dir)
        except CalledProcessError:
            # ignore sporadic connection timeouts from infrastructure
            warning(
                "Connection to K8s cluster lost. Continue with deleting namespace {ns}"
                .format(ns=release_name))

    # delete namespace
    namespace_helper = context.namespace_helper()
    namespace_helper.delete_namespace(namespace=release_name)
Example #7
0
 def is_valid_semver(tag_name):
     try:
         version.parse_to_semver(tag_name)
         return True
     except ValueError:
         warning('{tag} is not a valid SemVer string'.format(tag=tag_name))
         return False
Example #8
0
def extract_release_notes(
    reference_type: ReferenceType,
    text: str,
    user_login: str,
    current_component: gci.componentmodel.Component,
    source_component=None,
    reference_id: str = None,
) -> typing.List[ReleaseNote]:
    """
    Keyword arguments:
    reference_type -- type of reference_id, either pull request or commit
    reference_id -- reference identifier, could be a pull request number or commit hash
    text -- release note text
    user_login -- github user_login, used for referencing the user
        in the release note via @<user_login>
    cn_current_repo -- component name of the current repository
    """
    release_notes = list()
    if not text:
        return release_notes

    CATEGORY_IDS = _ \
        .chain(CATEGORIES) \
        .map(lambda category: category.identifiers) \
        .flatten() \
        .join('|') \
        .value()

    TARGET_GROUP_IDS = _ \
        .chain(TARGET_GROUPS) \
        .map(lambda target_group: target_group.identifiers) \
        .flatten() \
        .join('|') \
        .value()

    r = re.compile(
        rf"``` *(?P<category>{CATEGORY_IDS}) (?P<target_group>{TARGET_GROUP_IDS})"
        r"( (?P<source_repo>\S+/\S+/\S+)(( (?P<reference_type>#|\$)(?P<reference_id>\S+))?"
        r"( @(?P<user>\S+))?)( .*?)?|( .*?)?)\r?\n(?P<text>.*?)\n```",
        re.MULTILINE | re.DOTALL)
    for m in r.finditer(text):
        code_block = m.groupdict()
        try:
            rls_note_block = create_release_note_block(
                code_block=code_block,
                reference_type=reference_type,
                reference_id=reference_id,
                user_login=user_login,
                current_component=current_component,
                source_component=source_component,
            )
            if not rls_note_block:
                continue
            release_notes.append(rls_note_block)
        except ModelValidationError as e:
            warning(
                f'an exception occurred while extracting release notes: {e}')
            continue
    return release_notes
Example #9
0
def create_release_note_block(
    code_block: dict,
    reference_type: ReferenceType,
    user_login: str,
    current_component: gci.componentmodel.Component,
    source_component: gci.componentmodel.Component = None,
    reference_id: str = None,
) -> ReleaseNoteBlock:
    text = _.trim(code_block.get('text'))
    if not text or 'none' == text.lower():
        return None

    category = code_block.get('category')
    target_group = code_block.get('target_group')
    source_repo = code_block.get('source_repo')

    if source_component:
        reference_id = code_block.get('reference_id')
        reference_type = reference_type_for_type_identifier(
            code_block.get('reference_type'))
        user_login = code_block.get('user')
    elif source_repo:
        try:
            # try to fetch cd for the parsed source repo. The actual version does not matter,
            # we're only interested in the components GithubAccess (we assume it does not
            # change).
            ctx_repo_url = current_component.current_repository_ctx().baseUrl
            source_component = cnudie.retrieve.component_descriptor(
                name=source_repo,
                version=product.v2.greatest_component_version(
                    component_name=source_repo,
                    ctx_repo_base_url=ctx_repo_url,
                ),
                ctx_repo_url=ctx_repo_url,
            ).component
        except requests.exceptions.HTTPError:
            warning(
                f'Unable to retrieve component descriptor for source repository {source_repo}'
            )
            return None

        reference_type = reference_type_for_type_identifier(
            code_block.get('reference_type'))
        reference_id = code_block.get('reference_id')
        user_login = code_block.get('user')
    else:
        source_component = current_component

    return ReleaseNoteBlock(
        category_id=category,
        target_group_id=target_group,
        text=text,
        reference_type=reference_type,
        reference_id=reference_id,
        user_login=user_login,
        source_component=source_component,
        current_component=current_component,
    )
Example #10
0
    def _notify_broken_definition_owners(self, failed_descriptor):
        definition_descriptor = failed_descriptor.definition_descriptor
        main_repo = definition_descriptor.main_repo
        github_cfg = ccc.github.github_cfg_for_hostname(main_repo['hostname'], self._cfg_set)
        github_api = ccc.github.github_api(github_cfg)
        repo_owner, repo_name = main_repo['path'].split('/')

        repo_helper = ccc.github.github_repo_helper(
            host=main_repo['hostname'],
            org=repo_owner,
            repo=repo_name,
            branch=main_repo['branch'],
        )

        codeowners_enumerator = CodeownersEnumerator()
        codeowners_resolver = CodeOwnerEntryResolver(github_api=github_api)
        recipients = set(codeowners_resolver.resolve_email_addresses(
            codeowners_enumerator.enumerate_remote_repo(github_repo_helper=repo_helper)
        ))

        # in case no codeowners are available, resort to using the committer
        if not recipients:
            head_commit = repo_helper.repository.commit(main_repo['branch'])
            user_ids = {
                user_info.get('login')
                for user_info
                in (head_commit.committer, head_commit.author)
                if user_info and user_info.get('login')
            }
            for user_id in user_ids:
                user = github_api.user(user_id)
                if user.email:
                    recipients.add(user.email)

        # if there are still no recipients available print a warning
        if not recipients:
            warning(textwrap.dedent(
                f"""
                Unable to determine recipient for pipeline '{definition_descriptor.pipeline_name}'
                found in branch '{main_repo['branch']}' ({main_repo['path']}). Please make sure that
                CODEOWNERS and committers have exposed a public e-mail address in their profile.
                """
            ))
        else:
            info(f'Sending notification e-mail to {recipients} ({main_repo["path"]})')
            email_cfg = self._cfg_set.email("ses_gardener_cloud_sap")
            _send_mail(
                email_cfg=email_cfg,
                recipients=recipients,
                subject='Your pipeline definition in {repo} is erroneous'.format(
                    repo=main_repo['path'],
                ),
                mail_template=(
                    f"The pipeline definition for pipeline '{definition_descriptor.pipeline_name}' "
                    f" on branch '{main_repo['branch']}' contains errors.\n\n"
                    f"Error details:\n{str(failed_descriptor.error_details)}"
                )
            )
Example #11
0
 def retrieve_image(image_reference: str):
     try:
         container.registry.retrieve_container_image(
             image_reference=image_reference)
         info(f'downloaded {image_reference}')
     except Exception:
         warning(f'failed to retrieve {image_reference}')
         import traceback
         traceback.print_exc()
Example #12
0
 def __init__(self, raw:dict, concourse_api, name:str):
     self.concourse_api = concourse_api
     self.name = name
     self.raw = raw['config']
     resources = self.raw.get('resources', None)
     if not resources:
         warning('Pipeline did not contain resource definitions: {p}'.format(p=name))
         raise ValueError()
     self.resources = [PipelineConfigResource(r, self) for r in resources]
Example #13
0
def _display_info(dry_run: bool, operation: str, **kwargs):
    info("Concourse will be {o} using helm with the following arguments".format(o=operation))
    max_leng = max(map(len, kwargs.keys()))
    for k, v in kwargs.items():
        key_str = k.ljust(max_leng)
        info("{k}: {v}".format(k=key_str, v=v))

    if dry_run:
        warning("this was a --dry-run. Set the --no-dry-run flag to actually deploy")
Example #14
0
def post_to_slack(
    release_notes: ReleaseNote,
    github_repository_name: str,
    slack_cfg_name: str,
    slack_channel: str,
    release_version: str,
    max_msg_size_bytes: int = 20000,
):
    # slack can't auto link pull requests, commits or users
    # hence we force the link generation when building the markdown string
    release_notes_md_links = release_notes.to_markdown(
        force_link_generation=True)

    # XXX slack imposes a maximum msg size
    # https://api.slack.com/changelog/2018-04-truncating-really-long-messages#

    slack_cfg = ctx().cfg_factory().slack(slack_cfg_name)
    slack_helper = SlackHelper(slack_cfg)

    idx = 0
    i = 0

    try:
        while True:
            title = f'[{github_repository_name}:{release_version} released'

            # abort on last
            if idx + max_msg_size_bytes > len(release_notes_md_links):
                did_split = i > 0
                if did_split:
                    title += ' - final]'
                else:
                    title += ']'

                msg = release_notes_md_links[idx:]
                yield slack_helper.post_to_slack(channel=slack_channel,
                                                 title=title,
                                                 message=msg)
                break

            # post part
            title += f' - part {i} ]'
            msg = release_notes_md_links[idx:idx + max_msg_size_bytes]
            yield slack_helper.post_to_slack(channel=slack_channel,
                                             title=title,
                                             message=msg)

            i += 1
            idx += max_msg_size_bytes

    except RuntimeError as e:
        warning(e)
Example #15
0
 def deploy(self, definition_descriptor):
     try:
         with open(os.path.join(self.base_dir, definition_descriptor.pipeline_name), 'w') as f:
             f.write(definition_descriptor.pipeline)
         return DeployResult(
             definition_descriptor=definition_descriptor,
             deploy_status=DeployStatus.SUCCEEDED,
         )
     except Exception as e:
         warning(e)
         return DeployResult(
             definition_descriptor=definition_descriptor,
             deploy_status=DeployStatus.FAILED,
         )
Example #16
0
 def _revert(self, steps):
     # attempt to revert each step. Raise an exception if not all reverts succeeded.
     all_reverted = True
     for step in steps:
         step_name = step.name()
         info(f"Reverting step {step_name}")
         try:
             step.revert()
         except BaseException as e:
             all_reverted = False
             warning(f"An error occured while reverting step '{step_name}': {e}")
             traceback.print_exc()
     if not all_reverted:
         raise RuntimeError("Unable to revert all steps.")
Example #17
0
 def _post_with_retry(self, client, retries=5, **kwargs):
     try:
         response = client.files_upload(**kwargs)
         return response
     except slack.errors.SlackApiError as sae:
         error_code = sae.response.get('error')
         if retries < 1:
             raise sae  # no retries left (or none requested)
         if error_code == 'markdown_conversion_failed_because_of_read_failed':
             warning(f'received {error_code} - retrying {retries}')
             return self._post_with_retry(client=client,
                                          retries=retries - 1,
                                          **kwargs)
         else:
             raise sae  # only retry for known sporadic err
Example #18
0
 def _resolve_team_members(self, github_team_name: str):
     not_none(github_team_name)
     org_name, team_name = github_team_name.split(
         '/')  # always of form 'org/name'
     organisation = self.github_api.organization(org_name)
     # unfortunately, we have to look-up the team (no api to retrieve it by name)
     team_or_none = _first(
         filter(lambda team: team.name == team_name, organisation.teams()))
     if not team_or_none:
         warning('failed to lookup team {t}'.format(t=team_name))
         return []
     for member in map(self.github_api.user, team_or_none.members()):
         if member.email:
             yield member.email
         else:
             warning(f'no email found for GitHub user {member}')
Example #19
0
 def execute(self):
     executed_steps = list()
     for step in self._steps:
         step_name = step.name()
         info(f"Applying step '{step_name}'")
         executed_steps.append(step)
         try:
             output = step.apply()
             self._context.set_step_output(step_name, output)
         except BaseException as e:
             warning(f"An error occured while applying step '{step_name}': {e}")
             traceback.print_exc()
             # revert the changes attempted, in reverse order
             self._revert(reversed(executed_steps))
             # do not execute apply for remaining steps
             return False
     return True
Example #20
0
    def reachable_release_tags_from_commit(
            self, repo: git.Repo,
            commit: git.objects.Commit) -> typing.List[str]:
        '''Returns a list of release-tags whose tagged commits are ancestors of the given commit.

        The returned list is sorted in descending order, putting the greatest reachable tag first.
        '''
        tags = self.release_tags()

        visited = set()
        queue = list()
        queue.append(commit)
        visited.add(commit.hexsha)

        reachable_tags = list()

        while queue:
            commit = queue.pop(0)
            if commit.hexsha in tags:
                reachable_tags.append(tags[commit.hexsha])
            not_visited_parents = _.filter(
                commit.parents,
                lambda parent_commit: parent_commit.hexsha not in visited)
            if not_visited_parents:
                queue.extend(not_visited_parents)
                visited |= set(
                    _.map(not_visited_parents, lambda commit: commit.hexsha))

        reachable_tags.sort(key=lambda t: version.parse_to_semver(t),
                            reverse=True)

        if not reachable_tags:
            warning('no release tag found, falling back to root commit')
            root_commits = repo.iter_commits(rev=commit, max_parents=0)
            root_commit = next(root_commits, None)
            if not root_commit:
                fail(
                    f'could not determine root commit from rev {commit.hexsha}'
                )
            if next(root_commits, None):
                fail(
                    'cannot determine range for release notes. Repository has multiple root '
                    'commits. Specify range via commit_range parameter.')
            reachable_tags.append(root_commit.hexsha)

        return reachable_tags
Example #21
0
    def retrieve_scan_result(
            self,
            resource: gci.componentmodel.Resource,
            component: gci.componentmodel.Component,
            group_id: int=None,
    ):
        metadata = self._metadata(
            resource=resource,
            component=component,
            omit_version=True, # omit version when searching for existing app
            # (only one component version must exist per group by our chosen definition)
        )
        if not group_id:
            group_id = self._group_id

        existing_products = self._api.list_apps(
            group_id=group_id,
            custom_attribs=metadata
        )
        if len(existing_products) == 0:
            return None # no result existed yet

        if len(existing_products) > 1:
            warning(f"found more than one product for image '{resource.access.imageReference}'")
            products_to_rm = existing_products[1:]
            for p in products_to_rm:
                self._api.delete_product(p.product_id())
                info(
                    f'deleted product {p.display_name()} '
                    f'with product_id: {p.product_id()}'
                )

        # use first (or only) match (we already printed a warning if we found more than one)
        product =  existing_products[0]
        product_id = product.product_id()

        # update upload name to reflect new component version (if changed)
        upload_name = self._upload_name(resource, component)
        self._update_product_name(product_id, upload_name)

        # retrieve existing product's details (list of products contained only subset of data)
        product = self._api.scan_result(product_id=product_id)
        return product
Example #22
0
 def resolve_email_addresses(self, codeowners_entries):
     '''
     returns a generator yielding the resolved email addresses for the given iterable of
     github codeowners entries.
     '''
     for codeowner_entry in codeowners_entries:
         if '@' not in codeowner_entry:
             warning(f'invalid codeowners-entry: {codeowner_entry}')
             continue
         if not codeowner_entry.startswith('@'):
             yield codeowner_entry  # plain email address
         elif '/' not in codeowner_entry:
             email_addr = self._determine_email_address(codeowner_entry[1:])
             if email_addr:
                 yield email_addr
             else:
                 continue
         else:
             yield from self._resolve_team_members(codeowner_entry[1:])
Example #23
0
 def increment(self,
               method=None,
               url=None,
               response=None,
               error=None,
               _pool=None,
               _stacktrace=None):
     # super().increment will either raise an exception indicating that no retry is to
     # be performed or return a new, modified instance of this class
     retry = super().increment(method, url, response, error, _pool,
                               _stacktrace)
     # Use the Retry history to determine the number of retries.
     num_retries = len(self.history) if self.history else 0
     # Retrieve host from underlying connection pool and
     host = _pool.host
     warning(
         f'HTTP request (host: {host}, url: {url}, method: {method}) unsuccessful. '
         f'Retries so far: {num_retries}. Retrying ...')
     return retry
Example #24
0
    def deploy(self, definition_descriptor):
        pipeline_definition = definition_descriptor.pipeline
        pipeline_name = definition_descriptor.pipeline_name
        try:
            api = client.from_cfg(
                concourse_cfg=definition_descriptor.concourse_target_cfg,
                team_name=definition_descriptor.concourse_target_team,
            )
            response = api.set_pipeline(
                name=pipeline_name,
                pipeline_definition=pipeline_definition
            )
            info(
                'Deployed pipeline: ' + pipeline_name +
                ' to team: ' + definition_descriptor.concourse_target_team
            )
            if self.unpause_pipelines:
                info(f'Unpausing pipeline {pipeline_name}')
                api.unpause_pipeline(pipeline_name=pipeline_name)
            if self.expose_pipelines:
                api.expose_pipeline(pipeline_name=pipeline_name)

            deploy_status = DeployStatus.SUCCEEDED
            if response is concourse.client.model.SetPipelineResult.CREATED:
                deploy_status |= DeployStatus.CREATED
            elif response is concourse.client.model.SetPipelineResult.UPDATED:
                pass
            else:
                raise NotImplementedError

            return DeployResult(
                definition_descriptor=definition_descriptor,
                deploy_status=deploy_status,
            )
        except Exception as e:
            import traceback
            traceback.print_exc()
            warning(e)
            return DeployResult(
                definition_descriptor=definition_descriptor,
                deploy_status=DeployStatus.FAILED,
                error_details=traceback.format_exc(),
            )
Example #25
0
 def render(self, definition_descriptor):
     try:
         definition_descriptor = self._render(definition_descriptor)
         info('rendered pipeline {pn}'.format(pn=definition_descriptor.pipeline_name))
         return RenderResult(
             definition_descriptor,
             render_status=RenderStatus.SUCCEEDED,
         )
     except Exception:
         warning(
             f"erroneous pipeline definition '{definition_descriptor.pipeline_name}' "
             f"in repository '{definition_descriptor.main_repo.get('path')}' on branch "
             f"'{definition_descriptor.main_repo.get('branch')}'"
         )
         traceback.print_exc()
         return RenderResult(
             definition_descriptor,
             render_status=RenderStatus.FAILED,
             error_details=traceback.format_exc(),
         )
Example #26
0
def sync_org_webhooks(whd_deployment_cfg: WebhookDispatcherDeploymentConfig,):
    '''Syncs required organization webhooks for a given webhook dispatcher instance'''

    for organization_name, github_api, webhook_url in \
            _enumerate_required_org_webhooks(whd_deployment_cfg=whd_deployment_cfg):

        webhook_syncer = github.webhook.GithubWebHookSyncer(github_api)
        failed_hooks = 0
        try:
            webhook_syncer.create_or_update_org_hook(
                organization_name=organization_name,
                webhook_url=webhook_url,
                skip_ssl_validation=False,
            )
            info(f'Created/updated organization hook for organization "{organization_name}"')
        except Exception as e:
            failed_hooks += 1
            warning(f'org: {organization_name} - error: {e}')

    if failed_hooks != 0:
        warning('Some webhooks could not be set - for more details see above.')
Example #27
0
def reachable_release_tags_from_commit(github_helper: GitHubRepositoryHelper,
                                       repo: git.Repo,
                                       commit: git.objects.Commit) -> [str]:
    tags = release_tags(github_helper, repo)

    visited = set()
    queue = list()
    queue.append(commit)
    visited.add(commit.hexsha)

    reachable_tags = list()

    while queue:
        commit = queue.pop(0)
        if commit.hexsha in tags:
            reachable_tags.append(tags[commit.hexsha])
        not_visited_parents = _.filter(
            commit.parents,
            lambda parent_commit: parent_commit.hexsha not in visited)
        if not_visited_parents:
            queue.extend(not_visited_parents)
            visited |= set(
                _.map(not_visited_parents, lambda commit: commit.hexsha))

    reachable_tags.sort(key=lambda t: version.parse_to_semver(t), reverse=True)

    if not reachable_tags:
        warning('no release tag found, falling back to root commit')
        root_commits = repo.iter_commits(rev=commit, max_parents=0)
        root_commit = next(root_commits, None)
        if not root_commit:
            fail('could not determine root commit from rev {rev}'.format(
                rev=commit.hexsha))
        if next(root_commits, None):
            fail(
                'cannot determine range for release notes. Repository has multiple root commits. '
                'Specify range via commit_range parameter.')
        reachable_tags.append(root_commit.hexsha)

    return reachable_tags
Example #28
0
def resurrect_pods(
    namespace: str,
    concourse_client,
    kubernetes_client,
):
    '''
    concourse pods tend to crash and need to be pruned to help with the self-healing
    '''

    info(f'Check for not running concourse workers')
    worker_list = concourse_client.list_workers()
    pruned_workers = list()
    for worker in worker_list:
        worker_name = worker.name()
        info(f'Worker {worker_name}: {worker.state()}')
        if worker.state() != "running":
            warning(f'Prune worker {worker_name} and restart pod')
            pruned_workers.append(worker_name)
            concourse_client.prune_worker(worker_name)
            kubernetes_client.pod_helper().delete_pod(name=worker_name,
                                                      namespace=namespace)
    return pruned_workers
Example #29
0
    def _determine_repository_branches(
        self,
        repository,
    ):
        try:
            branch_cfg = self._branch_cfg_or_none(repository=repository)
        except LintingError as e:
            # some linting errors (and possibly warnings) present. Print warning and continue
            warning(e)
            return
        if not branch_cfg:
            # fallback for components w/o branch_cfg: use default branch
            try:
                default_branch = repository.default_branch
            except Exception:
                default_branch = 'master'
            yield (default_branch, None)
            return

        for branch in repository.branches():
            cfg_entry = branch_cfg.cfg_entry_for_branch(branch.name)
            if cfg_entry:
                yield (branch.name, cfg_entry)
Example #30
0
    def _cfg_element(self, cfg_type_name: str, cfg_name: str):
        cfg_type = self._cfg_types().get(cfg_type_name, None)
        if not cfg_type:
            raise ValueError('unknown cfg_type: ' + str(cfg_type_name))

        # retrieve model class c'tor - search module and sub-modules
        # TODO: switch to fully-qualified type names
        own_module = sys.modules[__name__]

        # python3.5 returns a three-tuple; python3.6+ returns a ModuleInfo
        if sys.version_info.minor <= 5:
            class ModuleInfo(object):
                def __init__(self, module_tuple):
                    self.path, self.name, _ = module_tuple

            def to_module_info(mi):
                return ModuleInfo(mi)
        else:
            def to_module_info(mi):
                return mi

        submodule_names = [
            own_module.__name__ + '.' + m.name
            for m in map(to_module_info, pkgutil.iter_modules(own_module.__path__))
        ]
        for module_name in [__name__] + submodule_names:
            submodule_name = module_name.split('.')[-1]
            if module_name != __name__:
                module = getattr(__import__(module_name), submodule_name)
            else:
                module = sys.modules[submodule_name]

            # skip if module does not define our type
            if not hasattr(module, cfg_type.cfg_type()):
                continue

            # if type is defined, validate
            element_type = getattr(module, cfg_type.cfg_type())
            if not type(element_type) == type:
                raise ValueError()
            # found it
            break
        else:
            raise ValueError('failed to find cfg type: ' + str(cfg_type.cfg_type()))

        # for now, let's assume all of our model element types are subtypes of NamedModelElement
        # (with the exception of ConfigurationSet)
        configs = self._configs(cfg_type.cfg_type_name())
        if cfg_name not in configs:
            raise ConfigElementNotFoundError('no such cfg element: {cn}. Known: {es}'.format(
                cn=cfg_name,
                es=', '.join(configs.keys())
            )
            )
        kwargs = {'raw_dict': configs[cfg_name]}

        if element_type == ConfigurationSet:
            kwargs.update({'cfg_name': cfg_name, 'cfg_factory': self})
        else:
            kwargs['name'] = cfg_name

        element_instance = element_type(**kwargs)

        try:
            element_instance.validate()
        except ModelValidationError as mve:
            warning(f'validation error for {cfg_name} - ignored: {mve}')

        return element_instance