コード例 #1
0
def destroy_concourse_landscape(config_name: str, release_name: str):
    # Fetch concourse and kubernetes config
    config_factory = global_ctx().cfg_factory()
    config_set = config_factory.cfg_set(cfg_name=config_name)
    concourse_cfg = config_set.concourse()

    kubernetes_config_name = concourse_cfg.kubernetes_cluster_config()
    kubernetes_config = config_factory.kubernetes(kubernetes_config_name)
    context = kube_ctx
    context.set_kubecfg(kubernetes_config.kubeconfig())

    # Delete helm release
    helm_cmd_path = which("helm")
    KUBECONFIG_FILE_NAME = 'kubecfg'
    helm_env = os.environ.copy()
    helm_env['KUBECONFIG'] = KUBECONFIG_FILE_NAME

    with tempfile.TemporaryDirectory() as temp_dir:
        with open(os.path.join(temp_dir, KUBECONFIG_FILE_NAME), 'w') as f:
            yaml.dump(kubernetes_config.kubeconfig(), f)

        try:
            subprocess.run([helm_cmd_path, "delete", release_name, "--purge"],
                           env=helm_env,
                           check=True,
                           cwd=temp_dir)
        except CalledProcessError:
            # ignore sporadic connection timeouts from infrastructure
            warning(
                "Connection to K8s cluster lost. Continue with deleting namespace {ns}"
                .format(ns=release_name))

    # delete namespace
    namespace_helper = context.namespace_helper()
    namespace_helper.delete_namespace(namespace=release_name)
コード例 #2
0
 def get_kubecfg(self):
     if self.kubeconfig:
         return kubernetes.client.ApiClient(configuration=self.kubeconfig)
     kubeconfig = os.environ.get('KUBECONFIG', None)
     args = global_ctx().args
     if args and hasattr(args, 'kubeconfig') and args.kubeconfig:
         kubeconfig = args.kubeconfig
     if self.kubeconfig:
         kubeconfig = self.kubeconfig
     if not kubeconfig:
         fail('KUBECONFIG env var must be set')
     return config.load_kube_config(existing_file(kubeconfig))
コード例 #3
0
def set_teams(config: ConcourseConfig):
    not_none(config)
    cfg_factory = global_ctx().cfg_factory()
    concourse_uam_cfg_name = config.concourse_uam_config()
    concourse_uam_cfg = cfg_factory.concourse_uam(concourse_uam_cfg_name)
    # Use main-team, i.e. the team that can change the other teams' credentials
    main_team = concourse_uam_cfg.main_team()

    concourse_api = client.from_cfg(
        concourse_cfg=config,
        team_name=main_team.teamname(),
    )
    for team in concourse_uam_cfg.teams():
        # We skip the main team here since we cannot update all its credentials at this time.
        if team.teamname() == main_team.teamname():
            continue
        concourse_api.set_team(team)
コード例 #4
0
ファイル: whd.py プロジェクト: pombredanne/cc-utils
def deploy_webhook_dispatcher_landscape(
    cfg_set,
    webhook_dispatcher_deployment_cfg: WebhookDispatcherDeploymentConfig,
    chart_dir: str,
    deployment_name: str,
):
    not_empty(deployment_name)

    chart_dir = os.path.abspath(chart_dir)
    cfg_factory = global_ctx().cfg_factory()

    # Set the global context to the cluster specified in KubernetesConfig
    kubernetes_config_name = webhook_dispatcher_deployment_cfg.kubernetes_config_name(
    )
    kubernetes_config = cfg_factory.kubernetes(kubernetes_config_name)
    kube_ctx.set_kubecfg(kubernetes_config.kubeconfig())

    ensure_cluster_version(kubernetes_config)

    # TLS config
    tls_config_name = webhook_dispatcher_deployment_cfg.tls_config_name()
    tls_config = cfg_factory.tls_config(tls_config_name)
    tls_secret_name = "webhook-dispatcher-tls"

    info('Creating tls-secret ...')
    create_tls_secret(
        tls_config=tls_config,
        tls_secret_name=tls_secret_name,
        namespace=deployment_name,
    )

    kubernetes_cfg_name = webhook_dispatcher_deployment_cfg.kubernetes_config_name(
    )
    kubernetes_cfg = cfg_factory.kubernetes(kubernetes_cfg_name)

    whd_helm_values = create_webhook_dispatcher_helm_values(
        cfg_set=cfg_set,
        webhook_dispatcher_deployment_cfg=webhook_dispatcher_deployment_cfg,
        cfg_factory=cfg_factory,
    )

    execute_helm_deployment(kubernetes_cfg, deployment_name, chart_dir,
                            deployment_name, whd_helm_values)
コード例 #5
0
ファイル: concourseutil.py プロジェクト: swapnilgm/cc-utils
def destroy_concourse(release: str, dry_run: bool = True):
    _display_info(
        dry_run=dry_run,
        operation="DESTROYED",
        deployment_name=release,
    )

    if dry_run:
        return

    helm_executable = which("helm")
    context = kubeutil.Ctx()
    namespace_helper = KubernetesNamespaceHelper(context.create_core_api())
    namespace_helper.delete_namespace(namespace=release)
    helm_env = os.environ.copy()

    # Check for optional arg --kubeconfig
    cli_args = global_ctx().args
    if cli_args and hasattr(cli_args, 'kubeconfig') and cli_args.kubeconfig:
        helm_env['KUBECONFIG'] = cli_args.kubeconfig

    subprocess.run([helm_executable, "delete", release, "--purge"], env=helm_env)
コード例 #6
0
ファイル: concourse.py プロジェクト: pombredanne/cc-utils
def deploy_concourse_landscape(
        config_set: ConfigurationSet,
        deployment_name: str='concourse',
        timeout_seconds: int=180,
):
    ensure_helm_setup()

    # Fetch all the necessary config
    config_factory = global_ctx().cfg_factory()
    concourse_cfg = config_set.concourse()

    # Kubernetes cluster config
    kubernetes_config_name = concourse_cfg.kubernetes_cluster_config()
    kubernetes_config = config_factory.kubernetes(kubernetes_config_name)

    # Container-registry config
    image_pull_secret_name = concourse_cfg.image_pull_secret()
    container_registry = config_factory.container_registry(image_pull_secret_name)
    cr_credentials = container_registry.credentials()

    # TLS config
    tls_config_name = concourse_cfg.tls_config()
    tls_config = config_factory.tls_config(tls_config_name)
    tls_secret_name = concourse_cfg.tls_secret_name()

    # Helm config
    helm_chart_default_values_name = concourse_cfg.helm_chart_default_values_config()
    default_helm_values = config_factory.concourse_helmchart(helm_chart_default_values_name).raw
    helm_chart_values_name = concourse_cfg.helm_chart_values()
    custom_helm_values = config_factory.concourse_helmchart(helm_chart_values_name).raw

    # Proxy config
    if concourse_cfg.proxy():
        proxy_cfg_name = concourse_cfg.proxy()
        proxy_cfg = config_factory.proxy(proxy_cfg_name)

        info('Creating config-maps for the mitm proxy ...')
        create_proxy_configmaps(
            proxy_cfg=proxy_cfg,
            namespace=deployment_name,
        )

    info('Creating default image-pull-secret ...')
    create_image_pull_secret(
        credentials=cr_credentials,
        image_pull_secret_name=image_pull_secret_name,
        namespace=deployment_name,
    )

    info('Creating tls-secret ...')
    create_tls_secret(
        tls_config=tls_config,
        tls_secret_name=tls_secret_name,
        namespace=deployment_name,
    )

    warning(
        'Teams will not be set up properly on Concourse if the deployment times out, '
        'even if Helm eventually succeeds. In this case, run the deployment command again after '
        'Concourse is available.'
    )

    instance_specific_helm_values = create_instance_specific_helm_values(
        concourse_cfg=concourse_cfg, config_factory=config_factory,
    )
    chart_version = concourse_cfg.helm_chart_version()

    # Add proxy sidecars to instance specific values.
    # NOTE: Only works for helm chart version 3.8.0 or greater
    if concourse_cfg.proxy():
        chart_version_semver = semver.parse_version_info(concourse_cfg.helm_chart_version())
        min_version = semver.parse_version_info('3.8.0')
        if chart_version_semver >= min_version:
            instance_specific_helm_values = add_proxy_values(
                config_set=config_set,
                instance_specific_values=instance_specific_helm_values,
            )
        else:
            fail('Proxy deployment requires the configured helm chart version to be at least 3.8.0')

    execute_helm_deployment(
        kubernetes_config,
        deployment_name,
        'stable/concourse',
        deployment_name,
        default_helm_values,
        custom_helm_values,
        instance_specific_helm_values,
        chart_version=chart_version,
    )

    info('Waiting until the webserver can be reached ...')
    deployment_helper = kube_ctx.deployment_helper()
    is_web_deployment_available = deployment_helper.wait_until_deployment_available(
        namespace=deployment_name,
        name='concourse-web',
        timeout_seconds=timeout_seconds,
    )
    if not is_web_deployment_available:
        fail(
            dedent(
                """No Concourse webserver reachable after {t} second(s).
                Check status of Pods created by "concourse-web"-deployment in namespace {ns}
                """
            ).format(
                t = timeout_seconds,
                ns = deployment_name,
            )
        )
    info('Webserver became accessible.')

    # Even though the deployment is available, the ingress might need a few seconds to update.
    time.sleep(3)

    info('Setting teams on Concourse ...')
    set_teams(config=concourse_cfg)
コード例 #7
0
ファイル: concourse.py プロジェクト: pombredanne/cc-utils
def add_proxy_values(
    config_set,
    instance_specific_values: dict,
):
    # The dir into which the config map is mounted in the volume.
    # NOTE: This _must_ align with what the mitm is configured to use by our docker image.
    MITM_CONFIG_DIR = '/.mitmproxy'

    # add the sidecar-configuration for the mitm-proxy
    config_factory = global_ctx().cfg_factory()
    concourse_cfg = config_set.concourse()
    secrets_server_cfg = config_set.secrets_server()
    proxy_cfg = config_factory.proxy(concourse_cfg.proxy())
    mitm_cfg = proxy_cfg.mitm_proxy()
    logging_cfg = mitm_cfg.logging()
    sidecar_image_cfg = proxy_cfg.sidecar_image()
    sidecar_containers = [{
        'name': 'setup-iptables-sidecar',
        'image': sidecar_image_cfg.image_reference(),
        'env': [{
            'name': 'PROXY_PORT',
            'value': f'{mitm_cfg.config()["listen_port"]}',
        },{
            'name': 'POD_IP',
            'valueFrom': {
                'fieldRef': {
                    'fieldPath':'status.podIP',
                },
            },
        }],
        'securityContext': {
            'privileged': True,
        },
    },{
        'name': 'mitm-proxy',
        'image': mitm_cfg.image_reference(),
        'env': [{
                'name': 'CONFIG_DIR',
                'value': MITM_CONFIG_DIR,
            },{
                'name': 'SECRETS_SERVER_ENDPOINT',
                'value': secrets_server_cfg.endpoint_url(),
            },{
                'name': 'SECRETS_SERVER_CONCOURSE_CFG_NAME',
                'value': secrets_server_cfg.secrets().concourse_cfg_name(),
            },{
                'name': 'ELASTIC_CONFIG_NAME',
                'value': logging_cfg.els_config_name(),
            },{
                'name': 'ELASTIC_INDEX_NAME',
                'value': logging_cfg.els_index_name(),
            },{
                'name': 'PROXY_CONFIG_NAME',
                'value': proxy_cfg.name(),
        }],
        'ports': [{
            'containerPort': mitm_cfg.config()["listen_port"],
            'hostPort': mitm_cfg.config()["listen_port"],
            'protocol': 'TCP',
        }],
        'volumeMounts': [{
            'name': 'mitm-config',
            'mountPath': MITM_CONFIG_DIR,
        }],
    }]
    additional_volumes = [{
        'name':'mitm-config',
        'configMap': {'name': MITM_CONFIG_CONFIGMAP_NAME},
    }]
    # add new values to dict without replacing existing ones
    vals = instance_specific_values.get('worker', {})
    vals.update(
        {
            'sidecarContainers': sidecar_containers,
            'additionalVolumes': additional_volumes,
        }
    )
    instance_specific_values['worker']= vals

    return instance_specific_values
コード例 #8
0
def deploy_concourse_landscape(config_name: str,
                               deployment_name: str = 'concourse',
                               timeout_seconds: int = '180'):
    ensure_not_empty(config_name)
    ensure_helm_setup()

    # Fetch all the necessary config
    config_factory = global_ctx().cfg_factory()
    config_set = config_factory.cfg_set(cfg_name=config_name)
    concourse_cfg = config_set.concourse()

    # Set the global context to the cluster specified by the given config
    kubernetes_config = config_set.kubernetes()
    kubeutil.ctx.set_kubecfg(kubernetes_config.kubeconfig())

    ensure_cluster_version(kubernetes_config)

    # Container-registry config
    image_pull_secret_name = concourse_cfg.image_pull_secret()
    container_registry = config_factory._cfg_element(
        cfg_type_name='container_registry',
        cfg_name=image_pull_secret_name,
    )
    cr_credentials = container_registry.credentials()

    # TLS config
    tls_config_name = concourse_cfg.tls_config()
    tls_config = config_factory._cfg_element(cfg_type_name='tls_config',
                                             cfg_name=tls_config_name)
    tls_secret_name = concourse_cfg.tls_secret_name()

    # Secrets server
    secrets_server_config = config_set.secrets_server()

    # Helm config
    helmchart_cfg_type = 'concourse_helmchart'
    default_helm_values = config_factory._cfg_element(
        cfg_type_name=helmchart_cfg_type,
        cfg_name=concourse_cfg.helm_chart_default_values_config()).raw
    custom_helm_values = config_factory._cfg_element(
        cfg_type_name=helmchart_cfg_type,
        cfg_name=concourse_cfg.helm_chart_values()).raw

    info('Creating default image-pull-secret ...')
    create_image_pull_secret(
        credentials=cr_credentials,
        image_pull_secret_name=image_pull_secret_name,
        namespace=deployment_name,
    )

    info('Creating tls-secret ...')
    create_tls_secret(
        tls_config=tls_config,
        tls_secret_name=tls_secret_name,
        namespace=deployment_name,
    )

    info('Deploying secrets-server ...')
    deploy_secrets_server(secrets_server_config=secrets_server_config, )

    info('Deploying delaying proxy ...')
    deploy_delaying_proxy(
        concourse_cfg=concourse_cfg,
        deployment_name=deployment_name,
    )
    info('Deploying Concourse ...')
    # Concourse is deployed last since Helm will lose connection if deployment takes more than ~60 seconds.
    # Helm will still continue deploying server-side, but the client will report an error.
    deploy_or_upgrade_concourse(
        default_helm_values=default_helm_values,
        custom_helm_values=custom_helm_values,
        concourse_cfg=concourse_cfg,
        kubernetes_config=kubernetes_config,
        deployment_name=deployment_name,
    )

    info('Waiting until the webserver can be reached ...')
    deployment_helper = kubeutil.ctx.deployment_helper()
    is_web_deployment_available = deployment_helper.wait_until_deployment_available(
        namespace=deployment_name,
        name='concourse-web',
        timeout_seconds=timeout_seconds,
    )
    if not is_web_deployment_available:
        fail(
            dedent("""No Concourse webserver reachable after {t} second(s).
                Check status of Pods created by "concourse-web"-deployment in namespace {ns}
                """).format(
                t=timeout_seconds,
                ns=deployment_name,
            ))
    info('Webserver became accessible.')

    # Even though the deployment is available, the ingress might need a few seconds to update.
    time.sleep(3)

    info('Setting teams on Concourse ...')
    set_teams(config=concourse_cfg)
コード例 #9
0
ファイル: concourse.py プロジェクト: minchaow/cc-utils
def deploy_concourse_landscape(
        config_name: str,
        deployment_name: str='concourse',
        timeout_seconds: int='180'
):
    not_empty(config_name)
    ensure_helm_setup()

    # Fetch all the necessary config
    config_factory = global_ctx().cfg_factory()
    config_set = config_factory.cfg_set(cfg_name=config_name)
    concourse_cfg = config_set.concourse()

    # Set the global context to the cluster specified in the ConcourseConfig
    kubernetes_config_name = concourse_cfg.kubernetes_cluster_config()
    kubernetes_config = config_factory.kubernetes(kubernetes_config_name)
    kube_ctx.set_kubecfg(kubernetes_config.kubeconfig())

    ensure_cluster_version(kubernetes_config)

    # Container-registry config
    image_pull_secret_name = concourse_cfg.image_pull_secret()
    container_registry = config_factory.container_registry(image_pull_secret_name)
    cr_credentials = container_registry.credentials()

    # TLS config
    tls_config_name = concourse_cfg.tls_config()
    tls_config = config_factory.tls_config(tls_config_name)
    tls_secret_name = concourse_cfg.tls_secret_name()

    # Secrets server
    secrets_server_config = config_set.secrets_server()

    # Helm config
    helm_chart_default_values_name = concourse_cfg.helm_chart_default_values_config()
    default_helm_values = config_factory.concourse_helmchart(helm_chart_default_values_name).raw
    helm_chart_values_name = concourse_cfg.helm_chart_values()
    custom_helm_values = config_factory.concourse_helmchart(helm_chart_values_name).raw

    info('Creating default image-pull-secret ...')
    create_image_pull_secret(
        credentials=cr_credentials,
        image_pull_secret_name=image_pull_secret_name,
        namespace=deployment_name,
    )

    info('Creating tls-secret ...')
    create_tls_secret(
        tls_config=tls_config,
        tls_secret_name=tls_secret_name,
        namespace=deployment_name,
    )

    info('Deploying secrets-server ...')
    deploy_secrets_server(
        secrets_server_config=secrets_server_config,
    )

    info('Deploying Concourse ...')
    warning(
        'Teams will not be set up properly on Concourse if the deployment times out, '
        'even if Helm eventually succeeds. In this case, run the deployment command again after '
        'Concourse is available.'
    )

    instance_specific_helm_values = create_instance_specific_helm_values(
        concourse_cfg=concourse_cfg, config_factory=config_factory,
    )
    chart_version = concourse_cfg.helm_chart_version()

    execute_helm_deployment(
        kubernetes_config,
        deployment_name,
        'stable/concourse',
        deployment_name,
        default_helm_values,
        custom_helm_values,
        instance_specific_helm_values,
        chart_version=chart_version,
    )

    info('Waiting until the webserver can be reached ...')
    deployment_helper = kube_ctx.deployment_helper()
    is_web_deployment_available = deployment_helper.wait_until_deployment_available(
        namespace=deployment_name,
        name='concourse-web',
        timeout_seconds=timeout_seconds,
    )
    if not is_web_deployment_available:
        fail(
            dedent(
                """No Concourse webserver reachable after {t} second(s).
                Check status of Pods created by "concourse-web"-deployment in namespace {ns}
                """
            ).format(
                t = timeout_seconds,
                ns = deployment_name,
            )
        )
    info('Webserver became accessible.')

    # Even though the deployment is available, the ingress might need a few seconds to update.
    time.sleep(3)

    info('Setting teams on Concourse ...')
    set_teams(config=concourse_cfg)