def replicate_secrets( cfg_factory: model.ConfigFactory, cfg_set: model.ConfigurationSet, kubeconfig: typing.Dict, secret_key: str, secret_cipher_algorithm: str, future_secrets: typing.Dict[str, str], team_name: str, target_secret_name: str, target_secret_namespace: str, target_secret_cfg_name: str, ): logger.info(f'replicating replication cfg set {cfg_set.name()}') # force cfg_set serialiser to include referenced cfg_sets cfg_sets = list(cfg_set._cfg_elements('cfg_set')) + [cfg_set] for cfg_set in cfg_sets: logger.info(f'config subset {cfg_set.name()=} with keys') for cfg_mapping in [cfg_set._cfg_mappings()]: for cfg_type_name, _ in cfg_mapping: pprint.pprint({ cfg_type_name: cfg_set._cfg_element_names(cfg_type_name=cfg_type_name) }) serialiser = model.ConfigSetSerialiser(cfg_sets=cfg_sets, cfg_factory=cfg_factory) kube_ctx = kube.ctx.Ctx(kubeconfig_dict=kubeconfig) secrets_helper = kube_ctx.secret_helper() logger.info( f'deploying indexed secrets on cluster {kube_ctx.kubeconfig.host}') for (k, v) in future_secrets.items(): m = re.match(r'key[-](\d+)', k) if m: f_name = model.concourse.secret_name_from_team( team_name, m.group(1)) encrypted_cipher_data = ccc.secrets_server.encrypt_data( key=v.encode('utf-8'), cipher_algorithm=secret_cipher_algorithm, serialized_secret_data=serialiser.serialise().encode('utf-8')) encoded_cipher_data = base64.b64encode( encrypted_cipher_data).decode('utf-8') logger.info( f'deploying secret {f_name} in namespace {target_secret_namespace}' ) secrets_helper.put_secret( name=f_name, raw_data={target_secret_cfg_name: encoded_cipher_data}, namespace=target_secret_namespace, ) else: logger.warning(f'ignoring unmatched key: {k}') logger.info(f'deployed encrypted secret for team: {team_name}')
def deploy_concourse_landscape( config_set: ConfigurationSet, deployment_name: str = 'concourse', timeout_seconds: int = 180, ): ensure_helm_setup() # Fetch all the necessary config config_factory = global_ctx().cfg_factory() concourse_cfg = config_set.concourse() # Kubernetes cluster config kubernetes_config_name = concourse_cfg.kubernetes_cluster_config() kubernetes_config = config_factory.kubernetes(kubernetes_config_name) # Container-registry config image_pull_secret_name = concourse_cfg.image_pull_secret() container_registry = config_factory.container_registry( image_pull_secret_name) cr_credentials = container_registry.credentials() # Helm config helm_chart_default_values_name = concourse_cfg.helm_chart_default_values_config( ) default_helm_values = config_factory.concourse_helmchart( helm_chart_default_values_name).raw helm_chart_values_name = concourse_cfg.helm_chart_values() custom_helm_values = config_factory.concourse_helmchart( helm_chart_values_name).raw # Proxy config if concourse_cfg.proxy(): proxy_cfg_name = concourse_cfg.proxy() proxy_cfg = config_factory.proxy(proxy_cfg_name) info('Creating config-maps for the mitm proxy ...') create_proxy_configmaps( proxy_cfg=proxy_cfg, namespace=deployment_name, ) info('Creating default image-pull-secret ...') create_image_pull_secret( credentials=cr_credentials, image_pull_secret_name=image_pull_secret_name, namespace=deployment_name, ) warning( 'Teams will not be set up properly on Concourse if the deployment times out, ' 'even if Helm eventually succeeds. In this case, run the deployment command again after ' 'Concourse is available.') instance_specific_helm_values = create_instance_specific_helm_values( concourse_cfg=concourse_cfg, config_factory=config_factory, ) chart_version = concourse_cfg.helm_chart_version() # Add proxy sidecars to instance specific values. # NOTE: Only works for helm chart version 3.8.0 or greater if concourse_cfg.proxy(): chart_version_semver = version.parse_to_semver( concourse_cfg.helm_chart_version()) min_version = version.parse_to_semver('3.8.0') if chart_version_semver >= min_version: instance_specific_helm_values = add_proxy_values( config_set=config_set, instance_specific_values=instance_specific_helm_values, ) else: fail( 'Proxy deployment requires the configured helm chart version to be at least 3.8.0' ) execute_helm_deployment( kubernetes_config, deployment_name, 'concourse/concourse', deployment_name, default_helm_values, custom_helm_values, instance_specific_helm_values, chart_version=chart_version, ) info('Waiting until the webserver can be reached ...') deployment_helper = kube_ctx.deployment_helper() is_web_deployment_available = deployment_helper.wait_until_deployment_available( namespace=deployment_name, name='concourse-web', timeout_seconds=timeout_seconds, ) if not is_web_deployment_available: fail( dedent("""No Concourse webserver reachable after {t} second(s). Check status of Pods created by "concourse-web"-deployment in namespace {ns} """).format( t=timeout_seconds, ns=deployment_name, )) info('Webserver became accessible.') # Even though the deployment is available, the ingress might need a few seconds to update. time.sleep(3) info('Setting teams on Concourse ...') set_teams(config=concourse_cfg)
def deploy_monitoring_landscape( cfg_set: ConfigurationSet, cfg_factory: ConfigFactory, ): kubernetes_cfg = cfg_set.kubernetes() concourse_cfg = cfg_set.concourse() # Set the global context to the cluster specified in KubernetesConfig kube_ctx.set_kubecfg(kubernetes_cfg.kubeconfig()) ensure_cluster_version(kubernetes_cfg) monitoring_config_name = concourse_cfg.monitoring_config() monitoring_cfg = cfg_factory.monitoring(monitoring_config_name) monitoring_namespace = monitoring_cfg.namespace() tls_config_name = concourse_cfg.tls_config() tls_config = cfg_factory.tls_config(tls_config_name) # deploy kube-state-metrics kube_state_metrics_helm_values = create_kube_state_metrics_helm_values( monitoring_cfg=monitoring_cfg ) execute_helm_deployment( kubernetes_cfg, monitoring_namespace, 'stable/kube-state-metrics', 'kube-state-metrics', kube_state_metrics_helm_values, ) # deploy postgresql exporter postgresql_helm_values = create_postgresql_helm_values( concourse_cfg=concourse_cfg, cfg_factory=cfg_factory, ) execute_helm_deployment( kubernetes_cfg, monitoring_namespace, 'stable/prometheus-postgres-exporter', 'prometheus-postgres-exporter', postgresql_helm_values, ) # deploy ingresses for kube-state-metrics, postgresql exporter monitoring_tls_secret_name = monitoring_cfg.tls_secret_name() info('Creating tls-secret in monitoring namespace for kube-state-metrics and postgresql...') create_tls_secret( tls_config=tls_config, tls_secret_name=monitoring_tls_secret_name, namespace=monitoring_namespace, basic_auth_cred=BasicAuthCred( user=monitoring_cfg.basic_auth_user(), password=monitoring_cfg.basic_auth_pwd() ) ) ingress_helper = kube_ctx.ingress_helper() info('Create ingress for kube-state-metrics') ingress = generate_monitoring_ingress_object( secret_name=monitoring_tls_secret_name, namespace=monitoring_namespace, hosts=[monitoring_cfg.ingress_host(), monitoring_cfg.external_url()], service_name=monitoring_cfg.kube_state_metrics().service_name(), service_port=monitoring_cfg.kube_state_metrics().service_port(), ) ingress_helper.replace_or_create_ingress(monitoring_namespace, ingress) info('Create ingress for postgres-exporter') ingress = generate_monitoring_ingress_object( secret_name=monitoring_tls_secret_name, namespace=monitoring_namespace, hosts=[monitoring_cfg.ingress_host(), monitoring_cfg.external_url()], service_name=monitoring_cfg.postgresql_exporter().service_name(), service_port=monitoring_cfg.postgresql_exporter().service_port(), ) ingress_helper.replace_or_create_ingress(monitoring_namespace, ingress)
def deploy_monitoring_landscape( cfg_set: ConfigurationSet, cfg_factory: ConfigFactory, ): kubernetes_cfg = cfg_set.kubernetes() concourse_cfg = cfg_set.concourse() ingress_cfg = cfg_set.ingress(concourse_cfg.ingress_config()) # Set the global context to the cluster specified in KubernetesConfig kube_ctx.set_kubecfg(kubernetes_cfg.kubeconfig()) ensure_cluster_version(kubernetes_cfg) monitoring_config_name = concourse_cfg.monitoring_config() monitoring_cfg = cfg_factory.monitoring(monitoring_config_name) monitoring_namespace = monitoring_cfg.namespace() # deploy kube-state-metrics kube_state_metrics_helm_values = create_kube_state_metrics_helm_values( monitoring_cfg=monitoring_cfg ) execute_helm_deployment( kubernetes_cfg, monitoring_namespace, 'stable/kube-state-metrics', 'kube-state-metrics', kube_state_metrics_helm_values, ) # deploy postgresql exporter postgresql_helm_values = create_postgresql_helm_values( concourse_cfg=concourse_cfg, cfg_factory=cfg_factory, ) execute_helm_deployment( kubernetes_cfg, monitoring_namespace, 'stable/prometheus-postgres-exporter', 'prometheus-postgres-exporter', postgresql_helm_values, ) # deploy ingresses for kube-state-metrics, postgresql exporter monitoring_tls_secret_name = monitoring_cfg.tls_secret_name() monitoring_basic_auth_secret_name = monitoring_cfg.basic_auth_secret_name() info( 'Creating basic-auth-secret in monitoring namespace for ' 'kube-state-metrics and postgresql...' ) create_basic_auth_secret( secret_name=monitoring_basic_auth_secret_name, namespace=monitoring_namespace, basic_auth_cred=BasicAuthCred( user=monitoring_cfg.basic_auth_user(), password=monitoring_cfg.basic_auth_pwd() ) ) # we need to create two ingress objects since nginx-ingress does not support rewrites for # multiple paths unless the premium version is used. NOTE: only one ingress should use # gardener-managed dns. Otherwise the dns-controller will periodically complain that the # dns-entry is busy as they share the same host ingress_helper = kube_ctx.ingress_helper() info('Create ingress for kube-state-metrics') ingress = generate_monitoring_ingress_object( basic_auth_secret_name=monitoring_basic_auth_secret_name, tls_secret_name=monitoring_tls_secret_name, namespace=monitoring_namespace, external_url=monitoring_cfg.external_url(), ingress_host=monitoring_cfg.ingress_host(), service_name=monitoring_cfg.kube_state_metrics().service_name(), service_port=monitoring_cfg.kube_state_metrics().service_port(), ingress_config=ingress_cfg, managed_dns=True, ) ingress_helper.replace_or_create_ingress(monitoring_namespace, ingress) info('Create ingress for postgres-exporter') ingress = generate_monitoring_ingress_object( basic_auth_secret_name=monitoring_basic_auth_secret_name, tls_secret_name=monitoring_tls_secret_name, namespace=monitoring_namespace, external_url=monitoring_cfg.external_url(), ingress_host=monitoring_cfg.ingress_host(), service_name=monitoring_cfg.postgresql_exporter().service_name(), service_port=monitoring_cfg.postgresql_exporter().service_port(), ingress_config=ingress_cfg, managed_dns=False, ) ingress_helper.replace_or_create_ingress(monitoring_namespace, ingress)