def deploy_oauth2_proxy( oauth2_proxy_config: Oauth2ProxyConfig, chart_dir: str, deployment_name: str, ): not_empty(deployment_name) cfg_factory = global_ctx().cfg_factory() kubernetes_config = cfg_factory.kubernetes( oauth2_proxy_config.kubernetes_config_name()) kube_ctx.set_kubecfg(kubernetes_config.kubeconfig()) ingress_config = cfg_factory.ingress(oauth2_proxy_config.ingress_config()) helm_values = create_oauth2_proxy_helm_values( oauth2_proxy_config=oauth2_proxy_config, ingress_config=ingress_config, deployment_name=deployment_name, config_factory=cfg_factory, ) execute_helm_deployment( kubernetes_config, oauth2_proxy_config.namespace(), chart_dir, deployment_name, helm_values, )
def deploy_gardenlinux_cache( kubernetes_config: KubernetesConfig, gardenlinux_cache_config: GardenlinuxCacheConfig, chart_dir: str, deployment_name: str, ): not_empty(deployment_name) cfg_factory = global_ctx().cfg_factory() chart_dir = os.path.abspath(chart_dir) kube_ctx.set_kubecfg(kubernetes_config.kubeconfig()) ensure_cluster_version(kubernetes_config) ingress_config = cfg_factory.ingress( gardenlinux_cache_config.ingress_config()) helm_values = create_gardenlinux_cache_helm_values( gardenlinux_cache_config=gardenlinux_cache_config, ingress_config=ingress_config, ) execute_helm_deployment( kubernetes_config, gardenlinux_cache_config.namespace(), chart_dir, deployment_name, helm_values, )
def deploy_monitoring_landscape( kubernetes_cfg: KubernetesConfig, concourse_cfg: ConcourseConfig, cfg_factory: ConfigFactory, ): # Set the global context to the cluster specified in KubernetesConfig kube_ctx.set_kubecfg(kubernetes_cfg.kubeconfig()) ensure_cluster_version(kubernetes_cfg) monitoring_namespace = kubernetes_cfg.monitoring().namespace() # deploy kube-state-metrics kube_state_metrics_helm_values = create_kube_state_metrics_helm_values( monitoring_cfg=kubernetes_cfg.monitoring()) execute_helm_deployment( kubernetes_cfg, monitoring_namespace, 'stable/kube-state-metrics', 'kube-state-metrics', kube_state_metrics_helm_values, ) # deploy postgresql exporter postgresql_helm_values = create_postgresql_helm_values( concourse_cfg=concourse_cfg, cfg_factory=cfg_factory, ) execute_helm_deployment( kubernetes_cfg, monitoring_namespace, 'stable/prometheus-postgres-exporter', 'prometheus-postgres-exporter', postgresql_helm_values, )
def deploy_oauth2_proxy( kubernetes_config: KubernetesConfig, oauth2_proxy_config: Oauth2ProxyConfig, deployment_name: str, ): not_empty(deployment_name) cfg_factory = global_ctx().cfg_factory() kube_ctx.set_kubecfg(kubernetes_config.kubeconfig()) ensure_cluster_version(kubernetes_config) ingress_config = cfg_factory.ingress(oauth2_proxy_config.ingress_config()) helm_values = create_oauth2_proxy_helm_values( oauth2_proxy_config=oauth2_proxy_config, ingress_config=ingress_config, deployment_name=deployment_name, ) execute_helm_deployment( kubernetes_config, oauth2_proxy_config.namespace(), 'stable/oauth2-proxy', deployment_name, helm_values, )
def deploy_webhook_dispatcher_landscape( cfg_set, webhook_dispatcher_deployment_cfg: WebhookDispatcherDeploymentConfig, chart_dir: str, deployment_name: str, ): not_empty(deployment_name) chart_dir = os.path.abspath(chart_dir) cfg_factory = global_ctx().cfg_factory() # Set the global context to the cluster specified in KubernetesConfig kubernetes_config_name = webhook_dispatcher_deployment_cfg.kubernetes_config_name( ) kubernetes_config = cfg_factory.kubernetes(kubernetes_config_name) kube_ctx.set_kubecfg(kubernetes_config.kubeconfig()) kubernetes_cfg_name = webhook_dispatcher_deployment_cfg.kubernetes_config_name( ) kubernetes_cfg = cfg_factory.kubernetes(kubernetes_cfg_name) whd_helm_values = create_webhook_dispatcher_helm_values( cfg_set=cfg_set, webhook_dispatcher_deployment_cfg=webhook_dispatcher_deployment_cfg, config_factory=cfg_factory, ) execute_helm_deployment(kubernetes_cfg, deployment_name, chart_dir, deployment_name, whd_helm_values)
def deploy_tekton_dashboard_ingress( kubernetes_config: KubernetesConfig, tekton_dashboard_ingress_config: TektonDashboardIngressConfig, chart_dir: str, deployment_name: str, ): not_empty(deployment_name) cfg_factory = global_ctx().cfg_factory() chart_dir = os.path.abspath(chart_dir) kube_ctx.set_kubecfg(kubernetes_config.kubeconfig()) ensure_cluster_version(kubernetes_config) ingress_config = cfg_factory.ingress( tekton_dashboard_ingress_config.ingress_config()) helm_values = create_tekton_dashboard_helm_values( tekton_dashboard_ingress_config=tekton_dashboard_ingress_config, ingress_config=ingress_config, ) execute_helm_deployment( kubernetes_config, tekton_dashboard_ingress_config.namespace(), chart_dir, deployment_name, helm_values, )
def deploy_webhook_dispatcher_landscape( cfg_set, webhook_dispatcher_deployment_cfg: WebhookDispatcherDeploymentConfig, chart_dir: str, deployment_name: str, ): not_empty(deployment_name) chart_dir = os.path.abspath(chart_dir) cfg_factory = global_ctx().cfg_factory() # Set the global context to the cluster specified in KubernetesConfig kubernetes_config_name = webhook_dispatcher_deployment_cfg.kubernetes_config_name( ) kubernetes_config = cfg_factory.kubernetes(kubernetes_config_name) kube_ctx.set_kubecfg(kubernetes_config.kubeconfig()) ensure_cluster_version(kubernetes_config) # TLS config tls_config_name = webhook_dispatcher_deployment_cfg.tls_config_name() tls_config = cfg_factory.tls_config(tls_config_name) tls_secret_name = "webhook-dispatcher-tls" info('Creating tls-secret ...') create_tls_secret( tls_config=tls_config, tls_secret_name=tls_secret_name, namespace=deployment_name, ) kubernetes_cfg_name = webhook_dispatcher_deployment_cfg.kubernetes_config_name( ) kubernetes_cfg = cfg_factory.kubernetes(kubernetes_cfg_name) whd_helm_values = create_webhook_dispatcher_helm_values( cfg_set=cfg_set, webhook_dispatcher_deployment_cfg=webhook_dispatcher_deployment_cfg, cfg_factory=cfg_factory, ) execute_helm_deployment(kubernetes_cfg, deployment_name, chart_dir, deployment_name, whd_helm_values)
def deploy_whitesource_api_extension( whitesource_cfg: WhitesourceConfig, kubernetes_cfg: KubernetesConfig, chart_dir: str = os.path.join(paths.chartdirt, 'whitesource-api-extension'), deployment_name: str = 'whitesource-api-extension', ): not_empty(deployment_name) chart_dir = os.path.abspath(chart_dir) # Set the global context to the cluster specified in KubernetesConfig kube_ctx.set_kubecfg(kubernetes_cfg) execute_helm_deployment( kubernetes_config=kubernetes_cfg, namespace=whitesource_cfg.namespace(), chart_name=chart_dir, release_name=deployment_name, )
def deploy_clam_av( clamav_cfg_name, kubernetes_cfg_name, ): cfg_factory = ci.util.ctx().cfg_factory() clamav_config = cfg_factory.clamav(clamav_cfg_name) kubernetes_config = cfg_factory.kubernetes(kubernetes_cfg_name) clamav_deployment_name = clamav_config.namespace() with TemporaryDirectory() as temp_dir: from_github_cfg = cfg_factory.github(CLAMAV_GITHUB_CONFIG) gitutil.GitHelper.clone_into( target_directory=temp_dir, github_cfg=from_github_cfg, github_repo_path=CLAMAV_HELMCHART_REPO_PATH, ) execute_helm_deployment( kubernetes_config, clamav_config.namespace(), f'{os.path.join(temp_dir, "clamav")}', clamav_deployment_name, create_clamav_helm_values(clamav_cfg_name), )
def deploy_concourse_landscape( config_set: ConfigurationSet, deployment_name: str = 'concourse', timeout_seconds: int = 180, ): ensure_helm_setup() # Fetch all the necessary config config_factory = global_ctx().cfg_factory() concourse_cfg = config_set.concourse() # Kubernetes cluster config kubernetes_config_name = concourse_cfg.kubernetes_cluster_config() kubernetes_config = config_factory.kubernetes(kubernetes_config_name) # Container-registry config image_pull_secret_name = concourse_cfg.image_pull_secret() container_registry = config_factory.container_registry( image_pull_secret_name) cr_credentials = container_registry.credentials() # Helm config helm_chart_default_values_name = concourse_cfg.helm_chart_default_values_config( ) default_helm_values = config_factory.concourse_helmchart( helm_chart_default_values_name).raw helm_chart_values_name = concourse_cfg.helm_chart_values() custom_helm_values = config_factory.concourse_helmchart( helm_chart_values_name).raw # Proxy config if concourse_cfg.proxy(): proxy_cfg_name = concourse_cfg.proxy() proxy_cfg = config_factory.proxy(proxy_cfg_name) info('Creating config-maps for the mitm proxy ...') create_proxy_configmaps( proxy_cfg=proxy_cfg, namespace=deployment_name, ) info('Creating default image-pull-secret ...') create_image_pull_secret( credentials=cr_credentials, image_pull_secret_name=image_pull_secret_name, namespace=deployment_name, ) warning( 'Teams will not be set up properly on Concourse if the deployment times out, ' 'even if Helm eventually succeeds. In this case, run the deployment command again after ' 'Concourse is available.') instance_specific_helm_values = create_instance_specific_helm_values( concourse_cfg=concourse_cfg, config_factory=config_factory, ) chart_version = concourse_cfg.helm_chart_version() # Add proxy sidecars to instance specific values. # NOTE: Only works for helm chart version 3.8.0 or greater if concourse_cfg.proxy(): chart_version_semver = version.parse_to_semver( concourse_cfg.helm_chart_version()) min_version = version.parse_to_semver('3.8.0') if chart_version_semver >= min_version: instance_specific_helm_values = add_proxy_values( config_set=config_set, instance_specific_values=instance_specific_helm_values, ) else: fail( 'Proxy deployment requires the configured helm chart version to be at least 3.8.0' ) execute_helm_deployment( kubernetes_config, deployment_name, 'concourse/concourse', deployment_name, default_helm_values, custom_helm_values, instance_specific_helm_values, chart_version=chart_version, ) info('Waiting until the webserver can be reached ...') deployment_helper = kube_ctx.deployment_helper() is_web_deployment_available = deployment_helper.wait_until_deployment_available( namespace=deployment_name, name='concourse-web', timeout_seconds=timeout_seconds, ) if not is_web_deployment_available: fail( dedent("""No Concourse webserver reachable after {t} second(s). Check status of Pods created by "concourse-web"-deployment in namespace {ns} """).format( t=timeout_seconds, ns=deployment_name, )) info('Webserver became accessible.') # Even though the deployment is available, the ingress might need a few seconds to update. time.sleep(3) info('Setting teams on Concourse ...') set_teams(config=concourse_cfg)
def deploy_monitoring_landscape( cfg_set: ConfigurationSet, cfg_factory: ConfigFactory, ): kubernetes_cfg = cfg_set.kubernetes() concourse_cfg = cfg_set.concourse() # Set the global context to the cluster specified in KubernetesConfig kube_ctx.set_kubecfg(kubernetes_cfg.kubeconfig()) ensure_cluster_version(kubernetes_cfg) monitoring_config_name = concourse_cfg.monitoring_config() monitoring_cfg = cfg_factory.monitoring(monitoring_config_name) monitoring_namespace = monitoring_cfg.namespace() tls_config_name = concourse_cfg.tls_config() tls_config = cfg_factory.tls_config(tls_config_name) # deploy kube-state-metrics kube_state_metrics_helm_values = create_kube_state_metrics_helm_values( monitoring_cfg=monitoring_cfg ) execute_helm_deployment( kubernetes_cfg, monitoring_namespace, 'stable/kube-state-metrics', 'kube-state-metrics', kube_state_metrics_helm_values, ) # deploy postgresql exporter postgresql_helm_values = create_postgresql_helm_values( concourse_cfg=concourse_cfg, cfg_factory=cfg_factory, ) execute_helm_deployment( kubernetes_cfg, monitoring_namespace, 'stable/prometheus-postgres-exporter', 'prometheus-postgres-exporter', postgresql_helm_values, ) # deploy ingresses for kube-state-metrics, postgresql exporter monitoring_tls_secret_name = monitoring_cfg.tls_secret_name() info('Creating tls-secret in monitoring namespace for kube-state-metrics and postgresql...') create_tls_secret( tls_config=tls_config, tls_secret_name=monitoring_tls_secret_name, namespace=monitoring_namespace, basic_auth_cred=BasicAuthCred( user=monitoring_cfg.basic_auth_user(), password=monitoring_cfg.basic_auth_pwd() ) ) ingress_helper = kube_ctx.ingress_helper() info('Create ingress for kube-state-metrics') ingress = generate_monitoring_ingress_object( secret_name=monitoring_tls_secret_name, namespace=monitoring_namespace, hosts=[monitoring_cfg.ingress_host(), monitoring_cfg.external_url()], service_name=monitoring_cfg.kube_state_metrics().service_name(), service_port=monitoring_cfg.kube_state_metrics().service_port(), ) ingress_helper.replace_or_create_ingress(monitoring_namespace, ingress) info('Create ingress for postgres-exporter') ingress = generate_monitoring_ingress_object( secret_name=monitoring_tls_secret_name, namespace=monitoring_namespace, hosts=[monitoring_cfg.ingress_host(), monitoring_cfg.external_url()], service_name=monitoring_cfg.postgresql_exporter().service_name(), service_port=monitoring_cfg.postgresql_exporter().service_port(), ) ingress_helper.replace_or_create_ingress(monitoring_namespace, ingress)
def deploy_concourse_landscape( config_name: str, deployment_name: str='concourse', timeout_seconds: int='180' ): not_empty(config_name) ensure_helm_setup() # Fetch all the necessary config config_factory = global_ctx().cfg_factory() config_set = config_factory.cfg_set(cfg_name=config_name) concourse_cfg = config_set.concourse() # Set the global context to the cluster specified in the ConcourseConfig kubernetes_config_name = concourse_cfg.kubernetes_cluster_config() kubernetes_config = config_factory.kubernetes(kubernetes_config_name) kube_ctx.set_kubecfg(kubernetes_config.kubeconfig()) ensure_cluster_version(kubernetes_config) # Container-registry config image_pull_secret_name = concourse_cfg.image_pull_secret() container_registry = config_factory.container_registry(image_pull_secret_name) cr_credentials = container_registry.credentials() # TLS config tls_config_name = concourse_cfg.tls_config() tls_config = config_factory.tls_config(tls_config_name) tls_secret_name = concourse_cfg.tls_secret_name() # Secrets server secrets_server_config = config_set.secrets_server() # Helm config helm_chart_default_values_name = concourse_cfg.helm_chart_default_values_config() default_helm_values = config_factory.concourse_helmchart(helm_chart_default_values_name).raw helm_chart_values_name = concourse_cfg.helm_chart_values() custom_helm_values = config_factory.concourse_helmchart(helm_chart_values_name).raw info('Creating default image-pull-secret ...') create_image_pull_secret( credentials=cr_credentials, image_pull_secret_name=image_pull_secret_name, namespace=deployment_name, ) info('Creating tls-secret ...') create_tls_secret( tls_config=tls_config, tls_secret_name=tls_secret_name, namespace=deployment_name, ) info('Deploying secrets-server ...') deploy_secrets_server( secrets_server_config=secrets_server_config, ) info('Deploying Concourse ...') warning( 'Teams will not be set up properly on Concourse if the deployment times out, ' 'even if Helm eventually succeeds. In this case, run the deployment command again after ' 'Concourse is available.' ) instance_specific_helm_values = create_instance_specific_helm_values( concourse_cfg=concourse_cfg, config_factory=config_factory, ) chart_version = concourse_cfg.helm_chart_version() execute_helm_deployment( kubernetes_config, deployment_name, 'stable/concourse', deployment_name, default_helm_values, custom_helm_values, instance_specific_helm_values, chart_version=chart_version, ) info('Waiting until the webserver can be reached ...') deployment_helper = kube_ctx.deployment_helper() is_web_deployment_available = deployment_helper.wait_until_deployment_available( namespace=deployment_name, name='concourse-web', timeout_seconds=timeout_seconds, ) if not is_web_deployment_available: fail( dedent( """No Concourse webserver reachable after {t} second(s). Check status of Pods created by "concourse-web"-deployment in namespace {ns} """ ).format( t = timeout_seconds, ns = deployment_name, ) ) info('Webserver became accessible.') # Even though the deployment is available, the ingress might need a few seconds to update. time.sleep(3) info('Setting teams on Concourse ...') set_teams(config=concourse_cfg)
def deploy_monitoring_landscape( cfg_set: ConfigurationSet, cfg_factory: ConfigFactory, ): kubernetes_cfg = cfg_set.kubernetes() concourse_cfg = cfg_set.concourse() ingress_cfg = cfg_set.ingress(concourse_cfg.ingress_config()) # Set the global context to the cluster specified in KubernetesConfig kube_ctx.set_kubecfg(kubernetes_cfg.kubeconfig()) ensure_cluster_version(kubernetes_cfg) monitoring_config_name = concourse_cfg.monitoring_config() monitoring_cfg = cfg_factory.monitoring(monitoring_config_name) monitoring_namespace = monitoring_cfg.namespace() # deploy kube-state-metrics kube_state_metrics_helm_values = create_kube_state_metrics_helm_values( monitoring_cfg=monitoring_cfg ) execute_helm_deployment( kubernetes_cfg, monitoring_namespace, 'stable/kube-state-metrics', 'kube-state-metrics', kube_state_metrics_helm_values, ) # deploy postgresql exporter postgresql_helm_values = create_postgresql_helm_values( concourse_cfg=concourse_cfg, cfg_factory=cfg_factory, ) execute_helm_deployment( kubernetes_cfg, monitoring_namespace, 'stable/prometheus-postgres-exporter', 'prometheus-postgres-exporter', postgresql_helm_values, ) # deploy ingresses for kube-state-metrics, postgresql exporter monitoring_tls_secret_name = monitoring_cfg.tls_secret_name() monitoring_basic_auth_secret_name = monitoring_cfg.basic_auth_secret_name() info( 'Creating basic-auth-secret in monitoring namespace for ' 'kube-state-metrics and postgresql...' ) create_basic_auth_secret( secret_name=monitoring_basic_auth_secret_name, namespace=monitoring_namespace, basic_auth_cred=BasicAuthCred( user=monitoring_cfg.basic_auth_user(), password=monitoring_cfg.basic_auth_pwd() ) ) # we need to create two ingress objects since nginx-ingress does not support rewrites for # multiple paths unless the premium version is used. NOTE: only one ingress should use # gardener-managed dns. Otherwise the dns-controller will periodically complain that the # dns-entry is busy as they share the same host ingress_helper = kube_ctx.ingress_helper() info('Create ingress for kube-state-metrics') ingress = generate_monitoring_ingress_object( basic_auth_secret_name=monitoring_basic_auth_secret_name, tls_secret_name=monitoring_tls_secret_name, namespace=monitoring_namespace, external_url=monitoring_cfg.external_url(), ingress_host=monitoring_cfg.ingress_host(), service_name=monitoring_cfg.kube_state_metrics().service_name(), service_port=monitoring_cfg.kube_state_metrics().service_port(), ingress_config=ingress_cfg, managed_dns=True, ) ingress_helper.replace_or_create_ingress(monitoring_namespace, ingress) info('Create ingress for postgres-exporter') ingress = generate_monitoring_ingress_object( basic_auth_secret_name=monitoring_basic_auth_secret_name, tls_secret_name=monitoring_tls_secret_name, namespace=monitoring_namespace, external_url=monitoring_cfg.external_url(), ingress_host=monitoring_cfg.ingress_host(), service_name=monitoring_cfg.postgresql_exporter().service_name(), service_port=monitoring_cfg.postgresql_exporter().service_port(), ingress_config=ingress_cfg, managed_dns=False, ) ingress_helper.replace_or_create_ingress(monitoring_namespace, ingress)