def component_descriptor( name: str, version: str, component_dependencies: CliHint(typehint=_parse_component_deps, action='append') = [], container_image_dependencies: CliHint(typehint=_parse_container_image_deps, action='append') = [], web_dependencies: CliHint(typehint=_parse_web_deps, action='append') = [], generic_dependencies: CliHint(typehint=_parse_generic_deps, action='append') = [], ): component = Component.create(name=name, version=version) component_deps = component.dependencies() for component_ref in component_dependencies: component_deps.add_component_dependency(component_ref) for image_dep in container_image_dependencies: component_deps.add_container_image_dependency(image_dep) for web_dep in web_dependencies: component_deps.add_web_dependency(web_dep) for generic_dep in generic_dependencies: component_deps.add_generic_dependency(generic_dep) product_dict = {'components': [component.raw]} print(yaml.dump(product_dict, indent=2))
def component_descriptor( name: str, version: str, component_dependencies: CliHint(action='append')=[], container_image_dependencies: CliHint(action='append')=[], web_dependencies: CliHint(action='append')=[], generic_dependencies: CliHint(action='append')=[], validation_policies: CliHint( type=ValidationPolicy, typehint=[ValidationPolicy], choices=[policy for policy in ValidationPolicy], )=[], ): component = Component.create(name=name, version=version) # maintain old behaviour if not validation_policies: validation_policies = [ValidationPolicy.FORBID_EXTRA_ATTRIBUTES] dependencies = _parse_dependencies( component_dependencies=component_dependencies, container_image_dependencies=container_image_dependencies, web_dependencies=web_dependencies, generic_dependencies=generic_dependencies, validation_policies=validation_policies, ) component.add_dependencies(dependencies) product_dict = {'components': [component.raw]} print(yaml.dump(product_dict, indent=2))
def greatest_release_version( github_repository_url: CliHint(help='e.g.: https://github.com/gardener/cc-utils'), anonymous: CliHint( typehint=bool, help='Use anonymous access. Unauthenticated access is only possible on github.com.', ) = False, ignore_prereleases: CliHint( typehint=bool, help='Ignore prerelease-versions (e.g.: 1.2.3-foo)', ) = False, ): '''Find the release with the greatest name (according to semver) and print its semver-version. Note: - This will only consider releases whose names are either immediately parseable as semver- versions, or prefixed with a single character ('v'). - The 'v'-prefix (if present) will be not be present in the output. - If a release has no name, its tag will be used instead of its name. For more details on the ordering of semantic versioning, see 'https://www.semver.org'. ''' parse_result = urllib.parse.urlparse(github_repository_url) if not parse_result.netloc: raise ValueError(f'Could not determine host for github-url {github_repository_url}') host = parse_result.netloc try: path = parse_result.path.strip('/') org, repo = path.split('/') except ValueError as e: raise ValueError(f"Could not extract org- and repo-name. Error: {e}") if anonymous: if 'github.com' not in host: raise ValueError("Anonymous access is only possible for github.com") github_api = github3.GitHub() repo_helper = GitHubRepositoryHelper(owner=org, name=repo, github_api=github_api) else: repo_helper = ccc.github.repo_helper(host=host, org=org, repo=repo) print( find_greatest_github_release_version( releases=repo_helper.repository.releases(), warn_for_unparseable_releases=False, ignore_prerelease_versions=ignore_prereleases, ) )
def add_dependencies( descriptor_src_file: CliHints.existing_file(), component_name: str, component_version: str, descriptor_out_file: str=None, component_dependencies: CliHint(action='append')=[], container_image_dependencies: CliHint(action='append')=[], web_dependencies: CliHint(action='append')=[], generic_dependencies: CliHint(action='append')=[], validation_policies: CliHint( type=ValidationPolicy, typehint=[ValidationPolicy], choices=[policy for policy in ValidationPolicy], )=[], ): product = ComponentDescriptor.from_dict(parse_yaml_file(descriptor_src_file)) component = product.component( ComponentReference.create(name=component_name, version=component_version) ) if not component: fail('component {c}:{v} was not found in {f}'.format( c=component_name, v=component_version, f=descriptor_src_file )) # maintain old behaviour if not validation_policies: validation_policies = [ValidationPolicy.FORBID_EXTRA_ATTRIBUTES] dependencies = _parse_dependencies( component_dependencies=component_dependencies, container_image_dependencies=container_image_dependencies, web_dependencies=web_dependencies, generic_dependencies=generic_dependencies, validation_policies=validation_policies, ) component.add_dependencies(dependencies) product_dict = {'components': [component.raw]} print(yaml.dump(product_dict, indent=2)) product_dict = json.loads(json.dumps({'components': [component.raw]})) if not descriptor_out_file: print(yaml.dump(product_dict, indent=2)) else: with open(descriptor_out_file, 'w') as f: yaml.dump(product_dict, f, indent=2)
def send_mail( email_cfg_name: CliHint( help="reference to an email cfg (see repo cc-config / secrets-server)" ), recipients: CliHint(typehint=[str], help="Recipient email address"), mail_template_file: CliHints.existing_file(), subject: CliHint(help="email subject"), cc_recipients: CliHint(typehint=[str], help="Carbon copy email address") = [], replace_token: CliHint(typehint=[str], help="<key>=<value> (replace <key> in body)") = [], ): ''' Sends an email using the specified email_cfg (retrieved from a cfg_factory) to the specified recipients. The mail body is read from a file. A simple token-replacement is done if (optional) replace-tokens are given. @param recipients: mail recipients (email addresses) @param mail_template_file: path to the mail template file. Must exist. @param subject: email subject @param cc_recipients: cc mail recipients @param replace_token: format: <token>=<replace-value> - tokens in mail-body are replaced ''' not_empty(email_cfg_name) cfg_factory = ctx().cfg_factory() email_cfg = cfg_factory.email(email_cfg_name) with open(mail_template_file) as f: mail_template = f.read() # validate template-tokens invalid_tokens = filter(lambda t: not isinstance(t, str) or '=' not in t, replace_token) if len(list(invalid_tokens)) > 0: fail('all replace-tokens must be of form <key>=<value>: ' + ' '.join(invalid_tokens)) # parse replace-tokens replace_tokens = dict(map(lambda t: t.split('=', 1), replace_token)) _send_mail( email_cfg=email_cfg, recipients=recipients, mail_template=mail_template, subject=subject, cc_recipients=cc_recipients, replace_tokens=replace_tokens, )
def deploy_or_upgrade_concourse( config_set_name: CliHint(typehint=str, help=CONFIG_SET_HELP), deployment_name: CliHint(typehint=str, help="namespace and deployment name")='concourse', timeout_seconds: CliHint(typehint=int, help="how long to wait for concourse startup")=180, ): '''Deploys a new concourse-instance using the given deployment name and config-directory.''' ensure_helm_setup() cfg_factory = ctx().cfg_factory() config_set = cfg_factory.cfg_set(config_set_name) setup_concourse.deploy_concourse_landscape( config_set=config_set, deployment_name=deployment_name, timeout_seconds=timeout_seconds, )
def deploy_or_upgrade_tekton_dashboard_ingress( config_set_name: CliHint(typehint=str, help=CONFIG_SET_HELP), chart_dir: CliHints.existing_dir( help="directory of tekton-dashboard-ingress chart"), deployment_name: str = 'tekton-dashboard-ingress', ): chart_dir = os.path.abspath(chart_dir) cfg_factory = ctx().cfg_factory() cfg_set = cfg_factory.cfg_set(config_set_name) concourse_cfg = cfg_set.concourse() kubernetes_cfg = cfg_factory.kubernetes( concourse_cfg.kubernetes_cluster_config()) oauth2_proxy_cfg = cfg_set.oauth2_proxy() tekton_dashboard_ingress_cfg = cfg_set.tekton_dashboard_ingress() setup_oauth2_proxy.deploy_oauth2_proxy( oauth2_proxy_config=oauth2_proxy_cfg, kubernetes_config=kubernetes_cfg, deployment_name=f'{deployment_name}-oauth2-proxy', ) setup_tekton_dashboard.deploy_tekton_dashboard_ingress( tekton_dashboard_ingress_config=tekton_dashboard_ingress_cfg, kubernetes_config=kubernetes_cfg, chart_dir=chart_dir, deployment_name=deployment_name, )
def deploy_or_upgrade_tekton_dashboard_ingress( config_set_name: CliHint(typehint=str, help=CONFIG_SET_HELP), tekton_chart_dir: CliHints.existing_dir(help="directory of tekton-dashboard-ingress chart"), oauth_proxy_chart_dir: CliHints.existing_dir(help="directory of oauth2-proxy chart"), deployment_name: str='tekton-dashboard-ingress', ): oauth2_proxy_chart_dir = os.path.abspath(oauth_proxy_chart_dir) tekton_chart_dir = os.path.abspath(tekton_chart_dir) cfg_factory = ctx().cfg_factory() cfg_set = cfg_factory.cfg_set(config_set_name) oauth2_proxy_cfg = cfg_set.oauth2_proxy() tekton_dashboard_ingress_cfg = cfg_set.tekton_dashboard_ingress() setup_oauth2_proxy.deploy_oauth2_proxy( oauth2_proxy_config=oauth2_proxy_cfg, chart_dir=oauth2_proxy_chart_dir, deployment_name=f'{deployment_name}-oauth2-proxy', ) setup_tekton_dashboard_ingress.deploy_tekton_dashboard_ingress( tekton_dashboard_ingress_config=tekton_dashboard_ingress_cfg, chart_dir=tekton_chart_dir, deployment_name=deployment_name, )
def set_teams(config_name: CliHint(typehint=str, help='the cfg_set name to use'), ): config_factory = ctx().cfg_factory() config_set = config_factory.cfg_set(cfg_name=config_name) config = config_set.concourse() setup_concourse.set_teams(config=config)
def upload_grouped_product_images( protecode_cfg_name: str, product_cfg_file: CliHints.existing_file(), processing_mode: CliHint( choices=list(ProcessingMode), type=ProcessingMode, )=ProcessingMode.RESCAN, protecode_group_id: int=5, parallel_jobs: int=4, cve_threshold: int=7, ignore_if_triaged: bool=True, reference_group_ids: [int]=[], ): cfg_factory = ctx().cfg_factory() protecode_cfg = cfg_factory.protecode(protecode_cfg_name) component_descriptor = ComponentDescriptor.from_dict( raw_dict=parse_yaml_file(product_cfg_file) ) upload_results, license_report = upload_grouped_images( protecode_cfg=protecode_cfg, component_descriptor=component_descriptor, protecode_group_id=protecode_group_id, parallel_jobs=parallel_jobs, cve_threshold=cve_threshold, ignore_if_triaged=ignore_if_triaged, processing_mode=processing_mode, reference_group_ids=reference_group_ids, )
def deploy_secrets_server(config_set_name: CliHint(typehint=str, help=CONFIG_SET_HELP), ): cfg_factory = ctx().cfg_factory() config_set = cfg_factory.cfg_set(config_set_name) secrets_server_config = config_set.secrets_server() setup_secrets_server.deploy_secrets_server( secrets_server_config=secrets_server_config, )
def deploy_or_upgrade_monitoring( config_set_name: CliHint(typehint=str, help=CONFIG_SET_HELP), ): cfg_factory = ctx().cfg_factory() cfg_set = cfg_factory.cfg_set(config_set_name) setup_monitoring.deploy_monitoring_landscape( cfg_set=cfg_set, cfg_factory=cfg_factory, )
def destroy_concourse(config_set_name: CliHint(typehint=str, help=CONFIG_SET_HELP), release_name: CliHint( typehint=str, help="namespace and deployment name") = 'concourse', dry_run: bool = True): '''Destroys a concourse-instance using the given helm release name''' _display_info( dry_run=dry_run, operation="DESTROYED", deployment_name=release_name, ) if dry_run: return setup_concourse.destroy_concourse_landscape(config_name=config_set_name, release_name=release_name)
def deploy_tekton( config_set_name: CliHint(typehint=str, help=CONFIG_SET_HELP), ): cfg_factory = ctx().cfg_factory() cfg_set = cfg_factory.cfg_set(config_set_name) tekton_cfg = cfg_set.tekton() setup_tekton.deploy_tekton( tekton_config=tekton_cfg, )
def add_dependencies( descriptor_src_file: CliHints.existing_file(), component_name: str, component_version: str, descriptor_out_file: str = None, component_dependencies: CliHint(typehint=_parse_component_deps, action='append') = [], container_image_dependencies: CliHint(typehint=_parse_container_image_deps, action='append') = [], web_dependencies: CliHint(typehint=_parse_web_deps, action='append') = [], generic_dependencies: CliHint(typehint=_parse_generic_deps, action='append') = [], ): product = ComponentDescriptor.from_dict( parse_yaml_file(descriptor_src_file)) component = product.component( ComponentReference.create(name=component_name, version=component_version)) if not component: fail('component {c}:{v} was not found in {f}'.format( c=component_name, v=component_version, f=descriptor_src_file)) component_deps = component.dependencies() for component_ref in component_dependencies: component_deps.add_component_dependency(component_ref) for image_dep in container_image_dependencies: component_deps.add_container_image_dependency(image_dep) for web_dep in web_dependencies: component_deps.add_web_dependency(web_dep) for generic_dep in generic_dependencies: component_deps.add_generic_dependency(generic_dep) product_dict = json.loads(json.dumps({'components': [component.raw]})) if not descriptor_out_file: print(yaml.dump(product_dict, indent=2)) else: with open(descriptor_out_file, 'w') as f: yaml.dump(product_dict, f, indent=2)
def deploy_or_upgrade_clamav(config_set_name: CliHint(typehint=str, help=CONFIG_SET_HELP), ): cfg_factory = ctx().cfg_factory() cfg_set = cfg_factory.cfg_set(config_set_name) concourse_cfg = cfg_set.concourse() kubernetes_cfg_name = concourse_cfg.kubernetes_cluster_config() clamav_cfg_name = concourse_cfg.clamav_config() if clamav_cfg_name is not None: setup_clamav.deploy_clam_av( clamav_cfg_name=clamav_cfg_name, kubernetes_cfg_name=kubernetes_cfg_name, ) else: info( f"No ClamAV configured for the Concourse in config set '{config_set_name}'. Will " "not deploy ClamAV.")
def deploy_or_upgrade_gardenlinux_cache( config_set_name: CliHint(typehint=str, help=CONFIG_SET_HELP), chart_dir: CliHints.existing_dir(help="directory of gardenlinux-cache chart"), deployment_name: str='gardenlinux-cache', ): chart_dir = os.path.abspath(chart_dir) cfg_factory = ctx().cfg_factory() cfg_set = cfg_factory.cfg_set(config_set_name) gardenlinux_cache_cfg = cfg_set.gardenlinux_cache() setup_gardenlinux_cache.deploy_gardenlinux_cache( gardenlinux_cache_config=gardenlinux_cache_cfg, chart_dir=chart_dir, deployment_name=deployment_name, )
def start_worker_resurrector( config_name: CliHint(typehint=str, help='the config set name to use'), concourse_namespace='concourse', ): config_factory = ctx().cfg_factory() config_set = config_factory.cfg_set(cfg_name=config_name) kubernetes_cfg = config_set.kubernetes() kube_client = kube.ctx.Ctx() kube_client.set_kubecfg(kubernetes_cfg.kubeconfig()) concourse_cfg = config_set.concourse() concourse_client = client.from_cfg(concourse_cfg=concourse_cfg, team_name='main') resurrect_pods(namespace=concourse_namespace, concourse_client=concourse_client, kubernetes_client=kube_client)
def deploy_or_upgrade_webhook_dispatcher( config_set_name: CliHint(typehint=str, help=CONFIG_SET_HELP), chart_dir: CliHints.existing_dir(help="directory of webhook dispatcher chart"), deployment_name: str='webhook-dispatcher', ): chart_dir = os.path.abspath(chart_dir) cfg_factory = ctx().cfg_factory() cfg_set = cfg_factory.cfg_set(config_set_name) webhook_dispatcher_deployment_cfg = cfg_set.webhook_dispatcher_deployment() setup_whd.deploy_webhook_dispatcher_landscape( cfg_set=cfg_set, webhook_dispatcher_deployment_cfg=webhook_dispatcher_deployment_cfg, chart_dir=chart_dir, deployment_name=deployment_name, )
def update_certificate( tls_config_name: CliHint(typehint=str, help="TLS config element name to update"), certificate_file: CliHints.existing_file(help="certificate file path"), key_file: CliHints.existing_file(help="private key file path"), output_path: CliHints.existing_dir( help="TLS config file output path")): # Stuff used for yaml formatting, when dumping a dictionary class LiteralStr(str): """Used to create yaml block style indicator | """ def literal_str_representer(dumper, data): """Used to create yaml block style indicator""" return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='|') # read new certificate data certificate_file = os.path.abspath(certificate_file) private_key_file = os.path.abspath(key_file) with open(certificate_file) as f: certificate = f.read() with open(private_key_file) as f: private_key = f.read() # set new certificate data to specified argument 'tls_config_name' cfg_factory = ctx().cfg_factory() tls_config_element = cfg_factory.tls_config(tls_config_name) tls_config_element.set_private_key(private_key) tls_config_element.set_certificate(certificate) # patch tls config dict so that yaml.dump outputs literal strings using '|' yaml.add_representer(LiteralStr, literal_str_representer) configs = cfg_factory._configs('tls_config') for k1, v1 in configs.items(): for k2, _ in v1.items(): configs[k1][k2] = LiteralStr(configs[k1][k2]) # dump updated tls config to given output path tls_config_type = cfg_factory._cfg_types()['tls_config'] tls_config_file = list(tls_config_type.sources())[0].file() with open(os.path.join(output_path, tls_config_file), 'w') as f: yaml.dump(configs, f, indent=2, default_flow_style=False)
def assign_github_team_to_repo( github_cfg_name: str, github_org_name: str, auth_token: CliHint(help="Token from an org admin user. Token must have 'admin:org' scope"), team_name: str='ci' ): ''' Assign team 'team_name' to all repositories in organization 'github_org_name' and give the team admin rights on those repositories. The team will be created if it does not exist and the technical github user (from github_cfg_name) will be assigned to the team. The token of the technical github user must have the privilege to create webhooks (scope admin:repo_hook) 'auth_token' must grant 'admin:org' privileges. ''' cfg_factory = ctx().cfg_factory() github_cfg = cfg_factory.github(github_cfg_name) github_username = github_cfg.credentials().username() # overwrite auth_token github_cfg.credentials().set_auth_token(auth_token=auth_token) github = ccc.github.github_api( github_cfg=github_cfg, ) _create_team( github=github, organization_name=github_org_name, team_name=team_name ) _add_user_to_team( github=github, organization_name=github_org_name, team_name=team_name, user_name=github_username ) _add_all_repos_to_team( github=github, organization_name=github_org_name, team_name=team_name )
def deploy_or_upgrade_whitesource_api_extension( config_set_name: CliHint(typehint=str, help=CONFIG_SET_HELP), chart_dir: str = False, deployment_name: str = False, whitesource_cfg_name: str = None, ): cfg_factory = ctx().cfg_factory() cfg_set = cfg_factory.cfg_set(config_set_name) kwargs = {} if deployment_name is not False: kwargs['deployment_name'] = deployment_name if chart_dir is not False: kwargs['chart_dir'] = existing_dir(chart_dir) whitesource_cfg = cfg_set.whitesource(cfg_name=whitesource_cfg_name) wss.deploy_whitesource_api_extension( kubernetes_cfg=cfg_set.kubernetes(), whitesource_cfg=whitesource_cfg, **kwargs, )
def deploy_or_upgrade_landscape( config_set_name: CliHint(typehint=str, help=CONFIG_SET_HELP), components: CliHint( type=LandscapeComponent, typehint=[LandscapeComponent], choices=[component for component in LandscapeComponent], help= "list of components to deploy. By default, ALL components will be deployed." ) = None, webhook_dispatcher_chart_dir: CliHint( typehint=str, help="directory of webhook dispatcher chart", ) = None, concourse_deployment_name: CliHint( typehint=str, help="namespace and deployment name for Concourse") = 'concourse', timeout_seconds: CliHint( typehint=int, help="how long to wait for concourse startup") = 180, webhook_dispatcher_deployment_name: str = 'webhook-dispatcher', dry_run: bool = True, ): '''Deploys the given components of the Concourse landscape. ''' # handle default (all known components) if not components: components = [component for component in LandscapeComponent] # Validate if LandscapeComponent.WHD in components: if not webhook_dispatcher_chart_dir: raise ValueError( f"--webhook-dispatcher-chart-dir must be given if component " f"'{LandscapeComponent.WHD.value}' is to be deployed.") else: webhook_dispatcher_chart_dir = existing_dir( webhook_dispatcher_chart_dir) _display_info( dry_run=dry_run, operation="DEPLOYED", deployment_name=concourse_deployment_name, components=components, ) if dry_run: return cfg_factory = ctx().cfg_factory() config_set = cfg_factory.cfg_set(config_set_name) concourse_cfg = config_set.concourse() # Set the global kubernetes cluster context to the cluster specified in the ConcourseConfig kubernetes_config_name = concourse_cfg.kubernetes_cluster_config() kubernetes_cfg = cfg_factory.kubernetes(kubernetes_config_name) kube_ctx.set_kubecfg(kubernetes_cfg.kubeconfig()) ensure_cluster_version(kubernetes_cfg) if LandscapeComponent.SECRETS_SERVER in components: info('Deploying Secrets Server') deploy_secrets_server(config_set_name=config_set_name, ) if LandscapeComponent.CONCOURSE in components: info('Deploying Concourse') deploy_or_upgrade_concourse( config_set_name=config_set_name, deployment_name=concourse_deployment_name, timeout_seconds=timeout_seconds, ) if LandscapeComponent.WHD in components: info('Deploying Webhook Dispatcher') deploy_or_upgrade_webhook_dispatcher( config_set_name=config_set_name, chart_dir=webhook_dispatcher_chart_dir, deployment_name=webhook_dispatcher_deployment_name, ) if LandscapeComponent.MONITORING in components: info('Deploying Monitoring stack') deploy_or_upgrade_monitoring(config_set_name=config_set_name, ) if LandscapeComponent.CLAMAV in components: info('Deploying ClamAV') deploy_or_upgrade_clamav(config_set_name=config_set_name, )
def deploy_or_upgrade_landscape( config_set_name: CliHint(typehint=str, help=CONFIG_SET_HELP), components: CliHint( type=LandscapeComponent, typehint=[LandscapeComponent], choices=[component for component in LandscapeComponent], help="list of components to deploy. By default, ALL components will be deployed." )=None, webhook_dispatcher_chart_dir: CliHint( typehint=str, help="directory of webhook dispatcher chart", )=None, gardenlinux_cache_chart_dir: CliHint( typehint=str, help="directory of gardenlinux-cache chart", )=None, concourse_deployment_name: CliHint( typehint=str, help="namespace and deployment name for Concourse" )='concourse', whitesource_backend_chart_dir: CliHint( typehint=str, help="directory of Whitesource Backend chart", )=None, whitesource_backend_deployment_name: CliHint( typehint=str, help="namespace and deployment name for Whitesource" )='whitesource-backend', whitesource_cfg_name: CliHint( typehint=str, help='Whitesource Config', )='gardener', timeout_seconds: CliHint(typehint=int, help="how long to wait for concourse startup")=180, webhook_dispatcher_deployment_name: str='webhook-dispatcher', gardenlinux_cache_deployment_name: str='gardenlinux-cache', dry_run: bool=True, ): '''Deploys the given components of the Concourse landscape. ''' # handle default (all known components) if not components: components = [component for component in LandscapeComponent] # Validate if LandscapeComponent.WHD in components: if not webhook_dispatcher_chart_dir: raise ValueError( f"--webhook-dispatcher-chart-dir must be given if component " f"'{LandscapeComponent.WHD.value}' is to be deployed." ) else: webhook_dispatcher_chart_dir = existing_dir(webhook_dispatcher_chart_dir) if LandscapeComponent.GARDENLINUX_CACHE in components: if not gardenlinux_cache_chart_dir: raise ValueError( f"--gardenlinux-cache-chart-dir must be given if component " f"'{LandscapeComponent.GARDENLINUX_CACHE.value}' is to be deployed." ) else: gardenlinux_cache_chart_dir = existing_dir(gardenlinux_cache_chart_dir) _display_info( dry_run=dry_run, operation="DEPLOYED", deployment_name=concourse_deployment_name, components=components, ) if dry_run: return cfg_factory = ctx().cfg_factory() config_set = cfg_factory.cfg_set(config_set_name) concourse_cfg = config_set.concourse() # Set the global kubernetes cluster context to the cluster specified in the ConcourseConfig kubernetes_config_name = concourse_cfg.kubernetes_cluster_config() kubernetes_cfg = cfg_factory.kubernetes(kubernetes_config_name) kube_ctx.set_kubecfg(kubernetes_cfg.kubeconfig()) if LandscapeComponent.SECRETS_SERVER in components: info('Deploying Secrets Server') deploy_secrets_server( config_set_name=config_set_name, ) if LandscapeComponent.CONCOURSE in components: info('Deploying Concourse') deploy_or_upgrade_concourse( config_set_name=config_set_name, deployment_name=concourse_deployment_name, timeout_seconds=timeout_seconds, ) if LandscapeComponent.WHD in components: info('Deploying Webhook Dispatcher') deploy_or_upgrade_webhook_dispatcher( config_set_name=config_set_name, chart_dir=webhook_dispatcher_chart_dir, deployment_name=webhook_dispatcher_deployment_name, ) if LandscapeComponent.CLAMAV in components: info ('Deploying ClamAV') deploy_or_upgrade_clamav( config_set_name=config_set_name, ) if LandscapeComponent.GARDENLINUX_CACHE in components: info ('Deploying Gardenlinux Cache') deploy_or_upgrade_gardenlinux_cache( config_set_name=config_set_name, chart_dir=gardenlinux_cache_chart_dir, deployment_name=gardenlinux_cache_deployment_name, ) if LandscapeComponent.WHITESOURCE_BACKEND in components: info ('Deploying Whitesource Backend') extra_args = {} if whitesource_backend_deployment_name: extra_args['deployment_name'] = whitesource_backend_deployment_name if whitesource_cfg_name: extra_args['whitesource_cfg_name'] = whitesource_cfg_name if whitesource_backend_chart_dir: extra_args['chart_dir'] = whitesource_backend_chart_dir deploy_or_upgrade_whitesource_api_extension( config_set_name=config_set_name, **extra_args, )