Exemple #1
0
def deploy_oauth2_proxy(
    oauth2_proxy_config: Oauth2ProxyConfig,
    chart_dir: str,
    deployment_name: str,
):
    not_empty(deployment_name)

    cfg_factory = global_ctx().cfg_factory()

    kubernetes_config = cfg_factory.kubernetes(
        oauth2_proxy_config.kubernetes_config_name())
    kube_ctx.set_kubecfg(kubernetes_config.kubeconfig())

    ingress_config = cfg_factory.ingress(oauth2_proxy_config.ingress_config())
    helm_values = create_oauth2_proxy_helm_values(
        oauth2_proxy_config=oauth2_proxy_config,
        ingress_config=ingress_config,
        deployment_name=deployment_name,
        config_factory=cfg_factory,
    )

    execute_helm_deployment(
        kubernetes_config,
        oauth2_proxy_config.namespace(),
        chart_dir,
        deployment_name,
        helm_values,
    )
Exemple #2
0
def deploy_gardenlinux_cache(
    kubernetes_config: KubernetesConfig,
    gardenlinux_cache_config: GardenlinuxCacheConfig,
    chart_dir: str,
    deployment_name: str,
):
    not_empty(deployment_name)

    cfg_factory = global_ctx().cfg_factory()
    chart_dir = os.path.abspath(chart_dir)

    kube_ctx.set_kubecfg(kubernetes_config.kubeconfig())
    ensure_cluster_version(kubernetes_config)

    ingress_config = cfg_factory.ingress(
        gardenlinux_cache_config.ingress_config())
    helm_values = create_gardenlinux_cache_helm_values(
        gardenlinux_cache_config=gardenlinux_cache_config,
        ingress_config=ingress_config,
    )

    execute_helm_deployment(
        kubernetes_config,
        gardenlinux_cache_config.namespace(),
        chart_dir,
        deployment_name,
        helm_values,
    )
Exemple #3
0
def destroy_concourse_landscape(config_name: str, release_name: str):
    # Fetch concourse and kubernetes config
    config_factory = global_ctx().cfg_factory()
    config_set = config_factory.cfg_set(cfg_name=config_name)
    concourse_cfg = config_set.concourse()

    kubernetes_config_name = concourse_cfg.kubernetes_cluster_config()
    kubernetes_config = config_factory.kubernetes(kubernetes_config_name)
    context = kube_ctx
    context.set_kubecfg(kubernetes_config.kubeconfig())

    # Delete helm release
    helm_cmd_path = ensure_helm_setup()
    KUBECONFIG_FILE_NAME = 'kubecfg'
    helm_env = os.environ.copy()
    helm_env['KUBECONFIG'] = KUBECONFIG_FILE_NAME

    with tempfile.TemporaryDirectory() as temp_dir:
        with open(os.path.join(temp_dir, KUBECONFIG_FILE_NAME), 'w') as f:
            yaml.dump(kubernetes_config.kubeconfig(), f)

        try:
            subprocess.run([helm_cmd_path, "delete", release_name],
                           env=helm_env,
                           check=True,
                           cwd=temp_dir)
        except CalledProcessError:
            # ignore sporadic connection timeouts from infrastructure
            warning(
                "Connection to K8s cluster lost. Continue with deleting namespace {ns}"
                .format(ns=release_name))

    # delete namespace
    namespace_helper = context.namespace_helper()
    namespace_helper.delete_namespace(namespace=release_name)
Exemple #4
0
def deploy_oauth2_proxy(
    kubernetes_config: KubernetesConfig,
    oauth2_proxy_config: Oauth2ProxyConfig,
    deployment_name: str,
):
    not_empty(deployment_name)

    cfg_factory = global_ctx().cfg_factory()

    kube_ctx.set_kubecfg(kubernetes_config.kubeconfig())
    ensure_cluster_version(kubernetes_config)

    ingress_config = cfg_factory.ingress(oauth2_proxy_config.ingress_config())
    helm_values = create_oauth2_proxy_helm_values(
        oauth2_proxy_config=oauth2_proxy_config,
        ingress_config=ingress_config,
        deployment_name=deployment_name,
    )

    execute_helm_deployment(
        kubernetes_config,
        oauth2_proxy_config.namespace(),
        'stable/oauth2-proxy',
        deployment_name,
        helm_values,
    )
Exemple #5
0
def deploy_webhook_dispatcher_landscape(
    cfg_set,
    webhook_dispatcher_deployment_cfg: WebhookDispatcherDeploymentConfig,
    chart_dir: str,
    deployment_name: str,
):
    not_empty(deployment_name)

    chart_dir = os.path.abspath(chart_dir)
    cfg_factory = global_ctx().cfg_factory()

    # Set the global context to the cluster specified in KubernetesConfig
    kubernetes_config_name = webhook_dispatcher_deployment_cfg.kubernetes_config_name(
    )
    kubernetes_config = cfg_factory.kubernetes(kubernetes_config_name)
    kube_ctx.set_kubecfg(kubernetes_config.kubeconfig())

    kubernetes_cfg_name = webhook_dispatcher_deployment_cfg.kubernetes_config_name(
    )
    kubernetes_cfg = cfg_factory.kubernetes(kubernetes_cfg_name)

    whd_helm_values = create_webhook_dispatcher_helm_values(
        cfg_set=cfg_set,
        webhook_dispatcher_deployment_cfg=webhook_dispatcher_deployment_cfg,
        config_factory=cfg_factory,
    )

    execute_helm_deployment(kubernetes_cfg, deployment_name, chart_dir,
                            deployment_name, whd_helm_values)
def deploy_tekton_dashboard_ingress(
    kubernetes_config: KubernetesConfig,
    tekton_dashboard_ingress_config: TektonDashboardIngressConfig,
    chart_dir: str,
    deployment_name: str,
):
    not_empty(deployment_name)

    cfg_factory = global_ctx().cfg_factory()
    chart_dir = os.path.abspath(chart_dir)

    kube_ctx.set_kubecfg(kubernetes_config.kubeconfig())
    ensure_cluster_version(kubernetes_config)

    ingress_config = cfg_factory.ingress(
        tekton_dashboard_ingress_config.ingress_config())
    helm_values = create_tekton_dashboard_helm_values(
        tekton_dashboard_ingress_config=tekton_dashboard_ingress_config,
        ingress_config=ingress_config,
    )

    execute_helm_deployment(
        kubernetes_config,
        tekton_dashboard_ingress_config.namespace(),
        chart_dir,
        deployment_name,
        helm_values,
    )
def create_tekton_dashboard_helm_values(
    tekton_dashboard_ingress_config: TektonDashboardIngressConfig,
    ingress_config: IngressConfig,
    config_factory,
):
    oauth2_proxy_config = global_ctx().cfg_factory().oauth2_proxy(
        tekton_dashboard_ingress_config.oauth2_proxy_config_name())
    helm_values = {
        'external_url':
        tekton_dashboard_ingress_config.external_url(),
        'ingress_host':
        tekton_dashboard_ingress_config.ingress_host(config_factory),
        'ingress_issuer_name':
        ingress_config.issuer_name(),
        'ingress_tls_hosts':
        ingress_config.tls_host_names(),
        'ingress_ttl':
        str(ingress_config.ttl()),
        'serviceName':
        tekton_dashboard_ingress_config.service_name(),
        'servicePort':
        tekton_dashboard_ingress_config.service_port(),
        'oauthProxyAuthUrl':
        oauth2_proxy_config.external_url(),
    }
    return helm_values
Exemple #8
0
def create_oauth2_proxy_helm_values(
    oauth2_proxy_config: Oauth2ProxyConfig,
    ingress_config: IngressConfig,
    deployment_name: str,
):
    oauth2_proxy_chart_config = oauth2_proxy_config.oauth2_proxy_chart_config()
    github_oauth_cfg = oauth2_proxy_config.github_oauth_config()
    github_cfg = global_ctx().cfg_factory().github(
        github_oauth_cfg.github_cfg_name())
    ingress_host = oauth2_proxy_config.ingress_host()

    helm_values = {
        'config': {
            'clientID':
            github_oauth_cfg.client_id(),
            'clientSecret':
            github_oauth_cfg.client_secret(),
            'cookieSecret':
            oauth2_proxy_chart_config.cookie_secret(),
            # configFile is expected with yamls '|-' syntax, i.e. newlines except for the last line
            'configFile':
            '\n'.join([
                'provider = "github"',
                'email_domains = [ "*" ]',
                'upstreams = [ "file:///dev/null" ]',
                f'cookie_name = "{oauth2_proxy_chart_config.cookie_name()}"',
                f'github_org = "{github_oauth_cfg.github_org()}"',
                f'github_team = "{github_oauth_cfg.github_team()}"',
                f'login_url = "{github_cfg.http_url()}/login/oauth/authorize"',
                f'redeem_url = "{github_cfg.http_url()}/login/oauth/access_token"',
                f'validate_url = "{github_cfg.api_url()}"',
                f'ssl_insecure_skip_verify = {str(github_oauth_cfg.no_ssl_verify()).lower()}',
                'whitelist_domains = ".gardener.cloud"',
            ])
        },
        'ingress': {
            'enabled':
            True,
            'path':
            "/",
            'annotations': {
                'kubernetes.io/ingress.class': 'nginx',
                'kubernetes.io/tls-acme': "true",
                'cert.gardener.cloud/issuer': ingress_config.issuer_name(),
                'cert.gardener.cloud/purpose': 'managed',
                'dns.gardener.cloud/class': 'garden',
                'dns.gardener.cloud/dnsnames': ingress_host,
                'dns.gardener.cloud/ttl': str(ingress_config.ttl()),
            },
            'hosts': [ingress_host,
                      oauth2_proxy_config.external_url()],
            'tls': [{
                'hosts': ingress_config.tls_host_names(),
                'secretName': f'{deployment_name}-tls'
            }],
        },
    }

    return helm_values
Exemple #9
0
 def get_kubecfg(self):
     if self.kubeconfig:
         return kubernetes.client.ApiClient(configuration=self.kubeconfig)
     kubeconfig = os.environ.get('KUBECONFIG', None)
     args = global_ctx().args
     if args and hasattr(args, 'kubeconfig') and args.kubeconfig:
         kubeconfig = args.kubeconfig
     if self.kubeconfig:
         kubeconfig = self.kubeconfig
     if not kubeconfig:
         fail('KUBECONFIG env var must be set')
     return config.load_kube_config(existing_file(kubeconfig))
Exemple #10
0
def deploy_webhook_dispatcher_landscape(
    cfg_set,
    webhook_dispatcher_deployment_cfg: WebhookDispatcherDeploymentConfig,
    chart_dir: str,
    deployment_name: str,
):
    not_empty(deployment_name)

    chart_dir = os.path.abspath(chart_dir)
    cfg_factory = global_ctx().cfg_factory()

    # Set the global context to the cluster specified in KubernetesConfig
    kubernetes_config_name = webhook_dispatcher_deployment_cfg.kubernetes_config_name(
    )
    kubernetes_config = cfg_factory.kubernetes(kubernetes_config_name)
    kube_ctx.set_kubecfg(kubernetes_config.kubeconfig())

    ensure_cluster_version(kubernetes_config)

    # TLS config
    tls_config_name = webhook_dispatcher_deployment_cfg.tls_config_name()
    tls_config = cfg_factory.tls_config(tls_config_name)
    tls_secret_name = "webhook-dispatcher-tls"

    info('Creating tls-secret ...')
    create_tls_secret(
        tls_config=tls_config,
        tls_secret_name=tls_secret_name,
        namespace=deployment_name,
    )

    kubernetes_cfg_name = webhook_dispatcher_deployment_cfg.kubernetes_config_name(
    )
    kubernetes_cfg = cfg_factory.kubernetes(kubernetes_cfg_name)

    whd_helm_values = create_webhook_dispatcher_helm_values(
        cfg_set=cfg_set,
        webhook_dispatcher_deployment_cfg=webhook_dispatcher_deployment_cfg,
        cfg_factory=cfg_factory,
    )

    execute_helm_deployment(kubernetes_cfg, deployment_name, chart_dir,
                            deployment_name, whd_helm_values)
Exemple #11
0
def set_teams(config: ConcourseConfig):
    not_none(config)
    cfg_factory = global_ctx().cfg_factory()
    concourse_uam_cfg_name = config.concourse_uam_config()
    concourse_uam_cfg = cfg_factory.concourse_uam(concourse_uam_cfg_name)
    # Use main-team, i.e. the team that can change the other teams' credentials
    main_team = concourse_uam_cfg.main_team()

    concourse_api = client.from_cfg(
        concourse_cfg=config,
        team_name=main_team.teamname(),
    )
    for team in concourse_uam_cfg.teams():
        if not team.has_basic_auth_credentials():
            continue
        # We skip the main team here since we cannot update all its credentials at this time.
        if team.teamname() == main_team.teamname():
            continue
        concourse_api.set_team(team)
Exemple #12
0
def deploy_concourse_landscape(
    config_set: ConfigurationSet,
    deployment_name: str = 'concourse',
    timeout_seconds: int = 180,
):
    ensure_helm_setup()

    # Fetch all the necessary config
    config_factory = global_ctx().cfg_factory()
    concourse_cfg = config_set.concourse()

    # Kubernetes cluster config
    kubernetes_config_name = concourse_cfg.kubernetes_cluster_config()
    kubernetes_config = config_factory.kubernetes(kubernetes_config_name)

    # Container-registry config
    image_pull_secret_name = concourse_cfg.image_pull_secret()
    container_registry = config_factory.container_registry(
        image_pull_secret_name)
    cr_credentials = container_registry.credentials()

    # Helm config
    helm_chart_default_values_name = concourse_cfg.helm_chart_default_values_config(
    )
    default_helm_values = config_factory.concourse_helmchart(
        helm_chart_default_values_name).raw
    helm_chart_values_name = concourse_cfg.helm_chart_values()
    custom_helm_values = config_factory.concourse_helmchart(
        helm_chart_values_name).raw

    # Proxy config
    if concourse_cfg.proxy():
        proxy_cfg_name = concourse_cfg.proxy()
        proxy_cfg = config_factory.proxy(proxy_cfg_name)

        info('Creating config-maps for the mitm proxy ...')
        create_proxy_configmaps(
            proxy_cfg=proxy_cfg,
            namespace=deployment_name,
        )

    info('Creating default image-pull-secret ...')
    create_image_pull_secret(
        credentials=cr_credentials,
        image_pull_secret_name=image_pull_secret_name,
        namespace=deployment_name,
    )

    warning(
        'Teams will not be set up properly on Concourse if the deployment times out, '
        'even if Helm eventually succeeds. In this case, run the deployment command again after '
        'Concourse is available.')

    instance_specific_helm_values = create_instance_specific_helm_values(
        concourse_cfg=concourse_cfg,
        config_factory=config_factory,
    )
    chart_version = concourse_cfg.helm_chart_version()

    # Add proxy sidecars to instance specific values.
    # NOTE: Only works for helm chart version 3.8.0 or greater
    if concourse_cfg.proxy():
        chart_version_semver = version.parse_to_semver(
            concourse_cfg.helm_chart_version())
        min_version = version.parse_to_semver('3.8.0')
        if chart_version_semver >= min_version:
            instance_specific_helm_values = add_proxy_values(
                config_set=config_set,
                instance_specific_values=instance_specific_helm_values,
            )
        else:
            fail(
                'Proxy deployment requires the configured helm chart version to be at least 3.8.0'
            )

    execute_helm_deployment(
        kubernetes_config,
        deployment_name,
        'concourse/concourse',
        deployment_name,
        default_helm_values,
        custom_helm_values,
        instance_specific_helm_values,
        chart_version=chart_version,
    )

    info('Waiting until the webserver can be reached ...')
    deployment_helper = kube_ctx.deployment_helper()
    is_web_deployment_available = deployment_helper.wait_until_deployment_available(
        namespace=deployment_name,
        name='concourse-web',
        timeout_seconds=timeout_seconds,
    )
    if not is_web_deployment_available:
        fail(
            dedent("""No Concourse webserver reachable after {t} second(s).
                Check status of Pods created by "concourse-web"-deployment in namespace {ns}
                """).format(
                t=timeout_seconds,
                ns=deployment_name,
            ))
    info('Webserver became accessible.')

    # Even though the deployment is available, the ingress might need a few seconds to update.
    time.sleep(3)

    info('Setting teams on Concourse ...')
    set_teams(config=concourse_cfg)
Exemple #13
0
def add_proxy_values(
    config_set,
    instance_specific_values: dict,
):
    # The dir into which the config map is mounted in the volume.
    # NOTE: This _must_ align with what the mitm is configured to use by our docker image.
    MITM_CONFIG_DIR = '/.mitmproxy'

    # add the sidecar-configuration for the mitm-proxy
    config_factory = global_ctx().cfg_factory()
    concourse_cfg = config_set.concourse()
    secrets_server_cfg = config_set.secrets_server()
    proxy_cfg = config_factory.proxy(concourse_cfg.proxy())
    mitm_cfg = proxy_cfg.mitm_proxy()
    logging_cfg = mitm_cfg.logging()
    sidecar_image_cfg = proxy_cfg.sidecar_image()
    sidecar_containers = [{
        'name':
        'setup-iptables-sidecar',
        'image':
        sidecar_image_cfg.image_reference(),
        'env': [{
            'name': 'PROXY_PORT',
            'value': f'{mitm_cfg.config()["listen_port"]}',
        }, {
            'name': 'POD_IP',
            'valueFrom': {
                'fieldRef': {
                    'fieldPath': 'status.podIP',
                },
            },
        }],
        'securityContext': {
            'privileged': True,
        },
    }, {
        'name':
        'mitm-proxy',
        'image':
        mitm_cfg.image_reference(),
        'env': [{
            'name': 'CONFIG_DIR',
            'value': MITM_CONFIG_DIR,
        }, {
            'name': 'SECRETS_SERVER_ENDPOINT',
            'value': secrets_server_cfg.endpoint_url(),
        }, {
            'name': 'SECRETS_SERVER_CONCOURSE_CFG_NAME',
            'value': secrets_server_cfg.secrets().concourse_cfg_name(),
        }, {
            'name': 'ELASTIC_CONFIG_NAME',
            'value': logging_cfg.els_config_name(),
        }, {
            'name': 'ELASTIC_INDEX_NAME',
            'value': logging_cfg.els_index_name(),
        }, {
            'name': 'PROXY_CONFIG_NAME',
            'value': proxy_cfg.name(),
        }],
        'ports': [{
            'containerPort': mitm_cfg.config()["listen_port"],
            'hostPort': mitm_cfg.config()["listen_port"],
            'protocol': 'TCP',
        }],
        'volumeMounts': [{
            'name': 'mitm-config',
            'mountPath': MITM_CONFIG_DIR,
        }],
    }]
    additional_volumes = [{
        'name': 'mitm-config',
        'configMap': {
            'name': MITM_CONFIG_CONFIGMAP_NAME
        },
    }]
    # add new values to dict without replacing existing ones
    vals = instance_specific_values.get('worker', {})
    vals.update({
        'sidecarContainers': sidecar_containers,
        'additionalVolumes': additional_volumes,
    })
    instance_specific_values['worker'] = vals

    return instance_specific_values