def create_webhook_dispatcher_helm_values( cfg_set, webhook_dispatcher_deployment_cfg: WebhookDispatcherDeploymentConfig, config_factory: ConfigFactory, ): # calculate secrets server endpoint secrets_server_name = webhook_dispatcher_deployment_cfg.secrets_server_config_name( ) secrets_server_cfg = config_factory.secrets_server(secrets_server_name) secrets_server_endpoint = secrets_server_cfg.endpoint_url() secrets_server_concourse_cfg_name = secrets_server_cfg.secrets( ).concourse_cfg_name() container_port = webhook_dispatcher_deployment_cfg.webhook_dispatcher_container_port( ) image_config = webhook_dispatcher_deployment_cfg.image_config() ingress_config_name = webhook_dispatcher_deployment_cfg.ingress_config() ingress_config = config_factory.ingress(ingress_config_name) env_vars = [] env_vars.append({ 'name': 'SECRETS_SERVER_ENDPOINT', 'value': secrets_server_endpoint }) env_vars.append({ 'name': 'SECRETS_SERVER_CONCOURSE_CFG_NAME', 'value': secrets_server_concourse_cfg_name }) cmd_args = [ '--port', f'"{container_port}"', '--cfg-set-name', cfg_set.name(), ] helm_values = { 'ingress_host': webhook_dispatcher_deployment_cfg.ingress_host(config_factory), 'ingress_ttl': str(ingress_config.ttl()), 'ingress_tls_hosts': ingress_config.tls_host_names(), 'ingress_issuer_name': ingress_config.issuer_name(), 'external_url': webhook_dispatcher_deployment_cfg.external_url(), 'image_repository': image_config.image_name(), 'image_tag': image_config.image_tag(), 'cmd_args': cmd_args, 'env_vars': env_vars, 'webhook_dispatcher_port': container_port, } return helm_values
def create_postgresql_helm_values( concourse_cfg: ConcourseConfig, cfg_factory: ConfigFactory, ): helm_chart_default_values_name = concourse_cfg.helm_chart_default_values_config( ) default_helm_values = cfg_factory.concourse_helmchart( helm_chart_default_values_name).raw helm_chart_values_name = concourse_cfg.helm_chart_values() custom_helm_values = cfg_factory.concourse_helmchart( helm_chart_values_name).raw helm_values = { "service": { "annotations": { "prometheus.io/scrape": "true" } }, "rbac": { "create": False, "pspEnabled": False }, "serviceAccount": { "create": False }, "config": { "datasource": { "host": "concourse-postgresql.concourse.svc.cluster.local", "user": default_helm_values.get('postgresql').get( 'postgresqlUsername'), "password": custom_helm_values.get('postgresql').get('postgresqlPassword'), "database": default_helm_values.get('postgresql').get( 'postgresqlDatabase'), "sslmode": "disable" }, "disableDefaultMetrics": True, "queries": LiteralStr(''' pg_database: query: "SELECT pg_database.datname, pg_database_size(pg_database.datname) as size FROM pg_database" metrics: - datname: usage: "LABEL" description: "Name of the database" - size: usage: "GAUGE" description: "Disk space used by the database" ''') } } return helm_values
def setUp(self): self.tmpdir = TemporaryDirectory() # type definition self.types_file = self._file( 'types', ''' a_type: src: - file: a_type_values.xxx model: cfg_type_name: a_type type: NamedModelElement defined_but_unused_type: src: - file: defined_but_unused_type_values.xxx model: cfg_type_name: defined_but_unused_type type: NamedModelElement cfg_set: src: - file: configs model: cfg_type_name: cfg_set type: ConfigurationSet ''') # cfg_set definitions self.configs_file = self._file( 'configs', ''' singleton_set: a_type: first_value_of_a second_set: a_type: second_value_of_a set_with_multiple_values: a_type: config_names: - first_value_of_a - second_value_of_a default: second_value_of_a ''') # value definitions self.a_type_values_file = self._file( 'a_type_values.xxx', ''' first_value_of_a: some_value: 123 second_value_of_a: some_value: 42 ignored_value_of_a: some_value: xxx ''') self.defined_but_unused_type_values_file = self._file( 'defined_but_unused_type_values.xxx', ''' unused: some_value: 7 ''') self.examinee = ConfigFactory.from_cfg_dir( cfg_dir=self.tmpdir.name, cfg_types_file=self.types_file)
def _update_pipeline_definition(self, push_event): try: try: update_repository_pipelines( repo_url=push_event.repository().repository_url(), cfg_set=self.cfg_set, whd_cfg=self.whd_cfg, ) except (JobMappingNotFoundError, ConfigElementNotFoundError) as e: # A config element was missing or o JobMapping for the given repository was present. # Print warning, reload and try again logger.warning( f'failed to update pipeline definition: {e}. Will reload config and try again.' ) # Attempt to fetch latest cfg from SS and replace it raw_dict = ccc.secrets_server.SecretsServerClient.default( ).retrieve_secrets() factory = ConfigFactory.from_dict(raw_dict) self.cfg_set = factory.cfg_set(self.cfg_set.name()) # retry update_repository_pipelines( repo_url=push_event.repository().repository_url(), cfg_set=self.cfg_set, whd_cfg=self.whd_cfg, ) except BaseException as be: logger.warning( f'failed to update pipeline definition - ignored {be}') import traceback try: traceback.print_exc() except BaseException: pass # ignore
def serialise_cfg( cfg_dir: CliHints.existing_dir(), cfg_sets: [str], out_file: str): factory = ConfigFactory.from_cfg_dir(cfg_dir=cfg_dir) cfg_sets = [factory.cfg_set(cfg_set) for cfg_set in cfg_sets] serialiser = CSS(cfg_sets=cfg_sets, cfg_factory=factory) with open(out_file, 'w') as f: f.write(serialiser.serialise())
def rotate_cfg_element( cfg_element: BtpApplicationCertificate, cfg_factory: model.ConfigFactory, ) -> typing.Tuple[cfg_mgmt.revert_function, dict, model.NamedModelElement]: gbaas_auth = cfg_factory.btp_application_certificate( cfg_element.auth_application_certificate()) gbaas_client = GBaasAppClient(gbaas_auth) # calc next serial no cn = cfg_element.common_name() serial_no, base = BtpApplicationCertificate.parse_serial_no_from_common_name( cn) next_sn = serial_no + 1 for info in gbaas_client.list_certificates_by_base(base): if info.serial_no >= next_sn: next_sn = info.serial_no + 1 next_cn = f'{next_sn}.{base}' # create certificate csr_pem, key_pem = _create_csr(cfg_element.subject(next_cn)) sb_auth = cfg_factory.btp_service_binding( cfg_element.cert_service_binding()) cs_client = CertServiceClient(sb_auth.credentials()) response = cs_client.create_client_certificate_chain( csr_pem, cfg_element.validity_in_days()) cert_pem = _extract_client_certificate(response) # add certificate to GBaas application id = gbaas_client.put_certificate( cert_pem=cert_pem, desc=f'CN={next_cn}', scopes=cfg_element.scopes(), ) secret_id = {'common_name': cn} raw_cfg = copy.deepcopy(cfg_element.raw) raw_cfg['certificate_pem'] = cert_pem raw_cfg['private_key_pem'] = key_pem raw_cfg['common_name'] = next_cn updated_elem = BtpApplicationCertificate(name=cfg_element.name(), raw_dict=raw_cfg, type_name=cfg_element._type_name) def revert(): gbaas_client.delete_certificate(next_cn, id) return revert, secret_id, updated_elem
def _authenticate( cfg_element: model.btp_service_binding.BtpServiceBinding, cfg_factory: model.ConfigFactory, ) -> SBClient: auth = cfg_element.auth_service_binding() credentials = cfg_factory.btp_service_binding(auth).credentials() sm_url = credentials['sm_url'] access_token = _get_oauth_token(credentials) return SBClient(sm_url, access_token)
def serialise_cfg( cfg_dir: CliHints.existing_dir(), out_file: str, cfg_sets: [str] = []): factory = ConfigFactory.from_cfg_dir(cfg_dir=cfg_dir) if not cfg_sets: cfg_sets = factory._cfg_element_names('cfg_set') cfg_sets = [factory.cfg_set(cfg_set) for cfg_set in cfg_sets] serialiser = CSS(cfg_sets=cfg_sets, cfg_factory=factory) with open(out_file, 'w') as f: f.write(serialiser.serialise())
def _cfg_factory_from_dir(): if Config.CONTEXT.value.config_dir() is None: return None from util import existing_dir cfg_dir = existing_dir(Config.CONTEXT.value.config_dir()) from model import ConfigFactory factory = ConfigFactory.from_cfg_dir(cfg_dir=cfg_dir) return factory
def _cfg_factory_from_dir(): if not args or not args.cfg_dir: return None from util import ensure_directory_exists cfg_dir = ensure_directory_exists(args.cfg_dir) from model import ConfigFactory factory = ConfigFactory.from_cfg_dir(cfg_dir=cfg_dir) return factory
def replicate_pipeline_definitions( definition_dir: str, cfg_dir: str, cfg_name: str, ): ''' replicates pipeline definitions from cc-pipelines to component repositories. will only be required until definitions are moved to component repositories. ''' util.ensure_directory_exists(definition_dir) util.ensure_directory_exists(cfg_dir) cfg_factory = ConfigFactory.from_cfg_dir(cfg_dir) cfg_set = cfg_factory.cfg_set(cfg_name) github_cfg = cfg_set.github() github = _create_github_api_object(github_cfg=github_cfg) repo_mappings = util.parse_yaml_file(os.path.join(definition_dir, '.repository_mapping')) for repo_path, definition_file in repo_mappings.items(): # hack: definition_file is a list with always exactly one entry definition_file = util.ensure_file_exists(os.path.join(definition_dir, definition_file[0])) with open(definition_file) as f: definition_contents = f.read() repo_owner, repo_name = repo_path.split('/') helper = GitHubHelper( github=github, repository_owner=repo_owner, repository_name=repo_name, ) # only do this for branch 'master' to avoid merge conflicts for branch_name in ['master']: #branches(github_cfg, repo_owner, repo_name): util.info('Replicating pipeline-definition: {r}:{b}'.format( r=repo_path, b=branch_name, ) ) # create pipeline definition file in .ci/pipeline_definitions try: helper.create_or_update_file( repository_branch=branch_name, repository_version_file_path='.ci/pipeline_definitions', file_contents=definition_contents, commit_message="Import cc-pipeline definition" ) except: pass # keep going
def setUp(self): # type definitions types = { 'a_type': { 'model': { 'cfg_type_name': 'a_type', 'type': 'NamedModelElement' } }, 'defined_but_unused_type': { 'model': { 'cfg_type_name': 'defined_but_unused_type', 'type': 'NamedModelElement' } }, 'cfg_set': { 'model': { 'cfg_type_name': 'cfg_set', 'type': 'ConfigurationSet' } }, } # config sets cfg_sets = { 'singleton_set': { 'a_type': 'first_value_of_a' }, 'set_with_multiple_values': { 'a_type': { 'config_names': ['first_value_of_a', 'second_value_of_a'], 'default': 'second_value_of_a', }, }, } # value definitions values = { 'first_value_of_a': { 'some_value': 123 }, 'second_value_of_a': { 'some_value': 42 }, 'ignored_value_of_a': { 'some_value': 'xxx' }, } raw = {'cfg_types': types, 'cfg_set': cfg_sets, 'a_type': values} self.examinee = ConfigFactory.from_dict(raw)
def delete_config_secret( cfg_element: BtpApplicationCertificate, cfg_queue_entry: cmm.CfgQueueEntry, cfg_factory: model.ConfigFactory, ): logger.info('Deleting old certificates') gbaas_auth = cfg_factory.btp_application_certificate( cfg_element.auth_application_certificate()) gbaas_client = GBaasAppClient(gbaas_auth) cn = cfg_queue_entry.secretId['common_name'] serial_no, base = BtpApplicationCertificate.parse_serial_no_from_common_name( cn) for info in gbaas_client.list_certificates_by_base(base): if info.serial_no < serial_no: gbaas_client.delete_certificate(info.cn, info.id)
def create_webhook_dispatcher_helm_values( cfg_set, webhook_dispatcher_deployment_cfg: WebhookDispatcherDeploymentConfig, cfg_factory: ConfigFactory, ): # calculate secrets server endpoint secrets_server_name = webhook_dispatcher_deployment_cfg.secrets_server_config_name( ) secrets_server_cfg = cfg_factory.secrets_server(secrets_server_name) secrets_server_endpoint = secrets_server_cfg.endpoint_url() secrets_server_concourse_cfg_name = '/'.join([ secrets_server_cfg.secrets().concourse_secret_name(), secrets_server_cfg.secrets().concourse_attribute() ]) container_port = webhook_dispatcher_deployment_cfg.webhook_dispatcher_container_port( ) env_vars = [] env_vars.append({ 'name': 'SECRETS_SERVER_ENDPOINT', 'value': secrets_server_endpoint }) env_vars.append({ 'name': 'SECRETS_SERVER_CONCOURSE_CFG_NAME', 'value': secrets_server_concourse_cfg_name }) cmd_args = [ '--webhook-dispatcher-cfg-name', webhook_dispatcher_deployment_cfg.webhook_dispatcher_config_name(), '--port', f'"{container_port}"', '--cfg-set-name', cfg_set.name(), ] helm_values = { 'ingress_host': webhook_dispatcher_deployment_cfg.ingress_host(), 'tls_name': webhook_dispatcher_deployment_cfg.tls_config_name(), 'image_reference': webhook_dispatcher_deployment_cfg.image_reference(), 'cmd_args': cmd_args, 'env_vars': env_vars, 'webhook_dispatcher_port': container_port, } return helm_values
def setUp(self): self.tmpdir = TemporaryDirectory() # type definition self.types_file = self._file( 'types', ''' a_type: src: - file: a_type_values.xxx model: cfg_type_name: a_type type: NamedModelElement cfg_set: src: - file: configs model: cfg_type_name: cfg_set type: ConfigurationSet ''') # cfg_set definitions self.configs_file = self._file( 'configs', ''' first_set: a_type: first_value_of_a second_set: a_type: second_value_of_a ''') # value definitions self.values_file = self._file( 'a_type_values.xxx', ''' first_value_of_a: some_value: 123 second_value_of_a: some_value: 42 ignored_value_of_a: some_value: xxx ''') self.examinee = ConfigFactory.from_cfg_dir( cfg_dir=self.tmpdir.name, cfg_types_file=self.types_file)
def deploy_pipeline( pipeline_file: CliHint('generated pipeline definition to deploy'), pipeline_name: CliHint('the name under which the pipeline shall be deployed'), team_name: CliHint('name of the target team'), config_dir: CliHints.existing_dir('directory containing Concourse configuration'), config_name: CliHint('identifier of the configuration in the config directory to use') ): cfg_factory = ConfigFactory.from_cfg_dir(cfg_dir=config_dir) concourse_cfg = cfg_factory.concourse(config_name) team_credentials = concourse_cfg.team_credentials(team_name) with open(pipeline_file) as f: pipeline_definition = f.read() pipelines.deploy_pipeline( pipeline_definition=pipeline_definition, pipeline_name=pipeline_name, concourse_cfg=concourse_cfg, team_credentials=team_credentials, )
def setUp(self): # type definitions types = { 'a_type': { 'model': { 'cfg_type_name': 'a_type', 'type': 'NamedModelElement' } }, 'cfg_set': { 'model': { 'cfg_type_name': 'cfg_set', 'type': 'ConfigurationSet' } } } # config sets cfg_sets = { 'first_set': { 'a_type': 'first_value_of_a' }, 'second_set': { 'a_type': 'second_value_of_a' } } # value definitions values = { 'first_value_of_a': { 'some_value': 123 }, 'second_value_of_a': { 'some_value': 42 }, 'ignored_value_of_a': { 'some_value': 'xxx' }, } raw = {'cfg_types': types, 'cfg_set': cfg_sets, 'a_type': values} self.examinee = ConfigFactory.from_dict(raw)
def _parse_model(raw_dict): factory = ConfigFactory.from_dict(raw_dict) return factory
def deploy_and_run_smoketest_pipeline( config_dir: str, config_name: str, concourse_team_name: str, cc_pipelines_repo_dir: str, cc_utils_repo_dir: str, wait_for_job_execution: bool=False, ): config_factory = ConfigFactory.from_cfg_dir(cfg_dir=config_dir) config_set = config_factory.cfg_set(cfg_name=config_name) concourse_cfg = config_set.concourse() # as this is an integration test, hard-code assumptions about the layout of # our pipelines repository template_path = os.path.join(cc_utils_repo_dir, 'concourse', 'templates') template_include_dir = os.path.join(cc_utils_repo_dir, 'concourse') pipeline_name = 'cc-smoketest' # retrieve pipeline-definition from github at hardcoded location github_cfg = config_set.github() githubrepobranch = GitHubRepoBranch( github_config=github_cfg, repo_owner='kubernetes', repo_name='cc-smoketest', branch='master', ) helper = GitHubRepositoryHelper.from_githubrepobranch( githubrepobranch=githubrepobranch, ) pipeline_definition = yaml.load( helper.retrieve_text_file_contents( file_path='.ci/smoketest-pipeline.yaml', ), Loader=yaml.SafeLoader, ) definition_descriptor = DefinitionDescriptor( pipeline_name=pipeline_name, pipeline_definition=pipeline_definition[pipeline_name], main_repo={'path': 'kubernetes/cc-smoketest', 'branch': 'master'}, concourse_target_cfg=concourse_cfg, concourse_target_team=concourse_team_name, ) preprocessor = DefinitionDescriptorPreprocessor() template_retriever = TemplateRetriever(template_path=template_path) renderer = Renderer( template_retriever=template_retriever, template_include_dir=template_include_dir, cfg_set=config_set, ) deployer = ConcourseDeployer( unpause_pipelines=True, expose_pipelines=True ) definition_descriptor = preprocessor.process_definition_descriptor(definition_descriptor) rendering_result = renderer.render(definition_descriptor) info('deploying pipeline') deployment_result = deployer.deploy(rendering_result.definition_descriptor) if not deployment_result.deploy_status & DeployStatus.SUCCEEDED: fail('deployment failed')
def create_instance_specific_helm_values( concourse_cfg: ConcourseConfig, config_factory: ConfigFactory, ): ''' Creates a dict containing instance specific helm values not explicitly stated in the `ConcourseConfig`'s helm_chart_values. ''' not_none(concourse_cfg) # 'main'-team credentials need to be included in the values.yaml, unlike the other teams concourse_uam_cfg_name = concourse_cfg.concourse_uam_config() concourse_uam_cfg = config_factory.concourse_uam(concourse_uam_cfg_name) main_team = concourse_uam_cfg.main_team() external_url = concourse_cfg.external_url() external_host = urlparse(external_url).netloc ingress_host = concourse_cfg.ingress_host() ingress_cfg = config_factory.ingress(concourse_cfg.ingress_config()) concourse_version = concourse_cfg.concourse_version() if concourse_version is ConcourseApiVersion.V5: github_config_name = concourse_cfg.github_enterprise_host() # 'github_enterprise_host' only configured in case of internal concourse # using github enterprise if github_config_name: github_config = config_factory.github(github_config_name) github_http_url = github_config.http_url() github_host = urlparse(github_http_url).netloc else: github_host = None bcrypted_pwd = bcrypt.hashpw(main_team.password().encode('utf-8'), bcrypt.gensalt()).decode('utf-8') instance_specific_values = { 'concourse': { 'web': { 'externalUrl': external_url, 'auth': { 'mainTeam': { 'localUser': main_team.username(), 'github': { 'team': main_team.github_auth_team() } }, 'github': { 'host': github_host } } } }, 'secrets': { 'localUsers': main_team.username() + ':' + bcrypted_pwd, 'githubClientId': main_team.github_auth_client_id(), 'githubClientSecret': main_team.github_auth_client_secret() }, 'web': { 'ingress': { 'annotations': { 'cert.gardener.cloud/issuer': ingress_cfg.issuer_name(), 'cert.gardener.cloud/purpose': 'managed', 'dns.gardener.cloud/class': 'garden', 'dns.gardener.cloud/dnsnames': ingress_host, 'dns.gardener.cloud/ttl': str(ingress_cfg.ttl()), }, 'hosts': [external_host, ingress_host], 'tls': [{ 'secretName': concourse_cfg.tls_secret_name(), 'hosts': ingress_cfg.tls_host_names(), }], } } } else: raise NotImplementedError( "Concourse version {v} not supported".format(v=concourse_version)) return instance_specific_values
def _update_pipeline_definition( self, push_event, delivery_id: str, repository: str, hostname: str, es_client: ccc.elasticsearch.ElasticSearchClient, dispatch_start_time: datetime.datetime, ): def _do_update( delivery_id: str, event_type: str, repository: str, hostname: str, dispatch_start_time: datetime.datetime, es_client: ccc.elasticsearch.ElasticSearchClient, ): repo_url = push_event.repository().repository_url() job_mapping_set = self.cfg_set.job_mapping() job_mapping = job_mapping_set.job_mapping_for_repo_url(repo_url, self.cfg_set) replicate_repository_pipelines( repo_url=repo_url, cfg_set=self.cfg_factory.cfg_set(job_mapping.replication_ctx_cfg_set()), whd_cfg=self.whd_cfg, ) process_end_time = datetime.datetime.now() process_total_seconds = (process_end_time - dispatch_start_time).total_seconds() webhook_delivery_metric = whd.metric.WebhookDelivery.create( delivery_id=delivery_id, event_type=event_type, repository=repository, hostname=hostname, process_total_seconds=process_total_seconds, ) if es_client: ccc.elasticsearch.metric_to_es( es_client=es_client, metric=webhook_delivery_metric, index_name=whd.metric.index_name(webhook_delivery_metric), ) try: _do_update( delivery_id=delivery_id, event_type='create', repository=repository, hostname=hostname, dispatch_start_time=dispatch_start_time, es_client=es_client, ) except (JobMappingNotFoundError, ConfigElementNotFoundError) as e: # A config element was missing or o JobMapping for the given repository was present. # Print warning, reload and try again logger.warning( f'failed to update pipeline definition: {e}. Will reload config and try again.' ) # Attempt to fetch latest cfg from SS and replace it raw_dict = ccc.secrets_server.SecretsServerClient.default().retrieve_secrets() self.cfg_factory = ConfigFactory.from_dict(raw_dict) self.cfg_set = self.cfg_factory.cfg_set(self.cfg_set.name()) # retry _do_update( delivery_id=delivery_id, event_type='create', repository=repository, hostname=hostname, dispatch_start_time=dispatch_start_time, es_client=es_client, )
def deploy_and_run_smoketest_pipeline( config_dir: str, config_name: str, concourse_team_name: str, cc_pipelines_repo_dir: str, wait_for_job_execution: bool = False, ): config_factory = ConfigFactory.from_cfg_dir(cfg_dir=config_dir) config_set = config_factory.cfg_set(cfg_name=config_name) concourse_cfg = config_set.concourse() team_credentials = concourse_cfg.team_credentials(concourse_team_name) # as this is an integration test, hard-code assumptions about the layout of # our pipelines repository calcdir = lambda path: os.path.join(cc_pipelines_repo_dir, path) pipeline_definition_file = calcdir('definitions/test/cc-smoketest.yaml') template_path = calcdir('templates') template_include_dir = cc_pipelines_repo_dir pipeline_name = 'cc-smoketest' job_name = 'cc-smoketest-master-head-update-job' pipeline_definition = parse_yaml_file(pipeline_definition_file, as_snd=False) pipeline_descriptor = RawPipelineDefinitionDescriptor( name=pipeline_name, base_definition=pipeline_definition[pipeline_name]['base_definition'], variants=pipeline_definition[pipeline_name]['variants'], template=pipeline_definition[pipeline_name]['template'], ) rendered_pipelines = list( render_pipelines( pipeline_definition=pipeline_descriptor, config_set=config_set, template_path=[template_path], template_include_dir=template_include_dir, )) if len(rendered_pipelines) == 0: fail("smoke-test pipeline definition not found") if len(rendered_pipelines) > 1: fail("expected exactly one smoketest pipeline-definition, got {n}". format(n=len(rendered_pipelines))) pipeline_definition, _, _ = rendered_pipelines[0] deploy_pipeline( pipeline_definition=pipeline_definition, pipeline_name=pipeline_name, concourse_cfg=concourse_cfg, team_credentials=team_credentials, ) api = ConcourseApi(base_url=concourse_cfg.external_url(), team_name=concourse_team_name) api.login(team=team_credentials.teamname(), username=team_credentials.username(), passwd=team_credentials.passwd()) # trigger an execution and wait for it to finish info('triggering smoketest job {jn}'.format(jn=job_name)) api.trigger_build(pipeline_name, job_name) if not wait_for_job_execution: info( 'will not wait for job-execution to finish (--wait-for-job-execution not set)' ) return # wait for the job to finish (currently we expect it to succeed) # todo: evaluate whether its structure meets our spec builds = api.job_builds(pipeline_name, job_name) if not builds or len(builds) < 1: fail('no builds were found (expected at least one!)') last_build = builds[-1] # please let this be ours # now wait for it to finish build_event_handler = api.build_events(last_build.id()) build_event_handler.process_events() info('it seems as if the job finished sucessfully; life is good :-)')
def test_absent_directory_causes_failure(self): with self.assertRaises(Failure): ConfigFactory.from_cfg_dir(cfg_dir='should not exist')
def test_from_dict_fails_on_none(self): with self.assertRaises(Failure): ConfigFactory.from_dict(None)
def deploy_monitoring_landscape( cfg_set: ConfigurationSet, cfg_factory: ConfigFactory, ): kubernetes_cfg = cfg_set.kubernetes() concourse_cfg = cfg_set.concourse() # Set the global context to the cluster specified in KubernetesConfig kube_ctx.set_kubecfg(kubernetes_cfg.kubeconfig()) ensure_cluster_version(kubernetes_cfg) monitoring_config_name = concourse_cfg.monitoring_config() monitoring_cfg = cfg_factory.monitoring(monitoring_config_name) monitoring_namespace = monitoring_cfg.namespace() tls_config_name = concourse_cfg.tls_config() tls_config = cfg_factory.tls_config(tls_config_name) # deploy kube-state-metrics kube_state_metrics_helm_values = create_kube_state_metrics_helm_values( monitoring_cfg=monitoring_cfg ) execute_helm_deployment( kubernetes_cfg, monitoring_namespace, 'stable/kube-state-metrics', 'kube-state-metrics', kube_state_metrics_helm_values, ) # deploy postgresql exporter postgresql_helm_values = create_postgresql_helm_values( concourse_cfg=concourse_cfg, cfg_factory=cfg_factory, ) execute_helm_deployment( kubernetes_cfg, monitoring_namespace, 'stable/prometheus-postgres-exporter', 'prometheus-postgres-exporter', postgresql_helm_values, ) # deploy ingresses for kube-state-metrics, postgresql exporter monitoring_tls_secret_name = monitoring_cfg.tls_secret_name() info('Creating tls-secret in monitoring namespace for kube-state-metrics and postgresql...') create_tls_secret( tls_config=tls_config, tls_secret_name=monitoring_tls_secret_name, namespace=monitoring_namespace, basic_auth_cred=BasicAuthCred( user=monitoring_cfg.basic_auth_user(), password=monitoring_cfg.basic_auth_pwd() ) ) ingress_helper = kube_ctx.ingress_helper() info('Create ingress for kube-state-metrics') ingress = generate_monitoring_ingress_object( secret_name=monitoring_tls_secret_name, namespace=monitoring_namespace, hosts=[monitoring_cfg.ingress_host(), monitoring_cfg.external_url()], service_name=monitoring_cfg.kube_state_metrics().service_name(), service_port=monitoring_cfg.kube_state_metrics().service_port(), ) ingress_helper.replace_or_create_ingress(monitoring_namespace, ingress) info('Create ingress for postgres-exporter') ingress = generate_monitoring_ingress_object( secret_name=monitoring_tls_secret_name, namespace=monitoring_namespace, hosts=[monitoring_cfg.ingress_host(), monitoring_cfg.external_url()], service_name=monitoring_cfg.postgresql_exporter().service_name(), service_port=monitoring_cfg.postgresql_exporter().service_port(), ) ingress_helper.replace_or_create_ingress(monitoring_namespace, ingress)
def setUp(self): self.factory = ConfigFactory.from_dict(simple_cfg_dict()) self.first_cfg_set = self.factory.cfg_set('first_set') self.second_cfg_set = self.factory.cfg_set('second_set') self.set_with_two_of_a_kind = self.factory.cfg_set('set_with_two_of_a_kind')
def test_absent_cfg_types_file_causes_failure(self): with self.assertRaises(FileNotFoundError): ConfigFactory.from_cfg_dir( cfg_dir=self.tmpdir.name, cfg_types_file='another absent file' )
def deserialise(self, raw_dict): return ConfigFactory.from_dict(raw_dict)
def test_from_dict_fails_on_missing_cfg_types(self): with self.assertRaises(ValueError): ConfigFactory.from_dict({})
def deploy_monitoring_landscape( cfg_set: ConfigurationSet, cfg_factory: ConfigFactory, ): kubernetes_cfg = cfg_set.kubernetes() concourse_cfg = cfg_set.concourse() ingress_cfg = cfg_set.ingress(concourse_cfg.ingress_config()) # Set the global context to the cluster specified in KubernetesConfig kube_ctx.set_kubecfg(kubernetes_cfg.kubeconfig()) ensure_cluster_version(kubernetes_cfg) monitoring_config_name = concourse_cfg.monitoring_config() monitoring_cfg = cfg_factory.monitoring(monitoring_config_name) monitoring_namespace = monitoring_cfg.namespace() # deploy kube-state-metrics kube_state_metrics_helm_values = create_kube_state_metrics_helm_values( monitoring_cfg=monitoring_cfg ) execute_helm_deployment( kubernetes_cfg, monitoring_namespace, 'stable/kube-state-metrics', 'kube-state-metrics', kube_state_metrics_helm_values, ) # deploy postgresql exporter postgresql_helm_values = create_postgresql_helm_values( concourse_cfg=concourse_cfg, cfg_factory=cfg_factory, ) execute_helm_deployment( kubernetes_cfg, monitoring_namespace, 'stable/prometheus-postgres-exporter', 'prometheus-postgres-exporter', postgresql_helm_values, ) # deploy ingresses for kube-state-metrics, postgresql exporter monitoring_tls_secret_name = monitoring_cfg.tls_secret_name() monitoring_basic_auth_secret_name = monitoring_cfg.basic_auth_secret_name() info( 'Creating basic-auth-secret in monitoring namespace for ' 'kube-state-metrics and postgresql...' ) create_basic_auth_secret( secret_name=monitoring_basic_auth_secret_name, namespace=monitoring_namespace, basic_auth_cred=BasicAuthCred( user=monitoring_cfg.basic_auth_user(), password=monitoring_cfg.basic_auth_pwd() ) ) # we need to create two ingress objects since nginx-ingress does not support rewrites for # multiple paths unless the premium version is used. NOTE: only one ingress should use # gardener-managed dns. Otherwise the dns-controller will periodically complain that the # dns-entry is busy as they share the same host ingress_helper = kube_ctx.ingress_helper() info('Create ingress for kube-state-metrics') ingress = generate_monitoring_ingress_object( basic_auth_secret_name=monitoring_basic_auth_secret_name, tls_secret_name=monitoring_tls_secret_name, namespace=monitoring_namespace, external_url=monitoring_cfg.external_url(), ingress_host=monitoring_cfg.ingress_host(), service_name=monitoring_cfg.kube_state_metrics().service_name(), service_port=monitoring_cfg.kube_state_metrics().service_port(), ingress_config=ingress_cfg, managed_dns=True, ) ingress_helper.replace_or_create_ingress(monitoring_namespace, ingress) info('Create ingress for postgres-exporter') ingress = generate_monitoring_ingress_object( basic_auth_secret_name=monitoring_basic_auth_secret_name, tls_secret_name=monitoring_tls_secret_name, namespace=monitoring_namespace, external_url=monitoring_cfg.external_url(), ingress_host=monitoring_cfg.ingress_host(), service_name=monitoring_cfg.postgresql_exporter().service_name(), service_port=monitoring_cfg.postgresql_exporter().service_port(), ingress_config=ingress_cfg, managed_dns=False, ) ingress_helper.replace_or_create_ingress(monitoring_namespace, ingress)