def generate_delaying_proxy_deployment(concourse_cfg: ConcourseConfig): ensure_not_none(concourse_cfg) external_url = concourse_cfg.external_url() label = {'app': 'delaying-proxy'} return V1Deployment( kind='Deployment', metadata=V1ObjectMeta(name='delaying-proxy'), spec=V1DeploymentSpec( replicas=1, selector=V1LabelSelector(match_labels=label), template=V1PodTemplateSpec( metadata=V1ObjectMeta(labels=label), spec=V1PodSpec(containers=[ V1Container( image= 'eu.gcr.io/gardener-project/cc/github-enterprise-proxy:0.1.0', image_pull_policy='IfNotPresent', name='delaying-proxy', ports=[ V1ContainerPort(container_port=8080), ], liveness_probe=V1Probe( tcp_socket=V1TCPSocketAction(port=8080), initial_delay_seconds=10, period_seconds=10, ), env=[ V1EnvVar(name='CONCOURSE_URL', value=external_url), ], ), ], ))))
def get_streaming_app_deployment( name: str = "test-app", input_topics: Optional[str] = "input-topic", output_topic: Optional[str] = "output-topic", error_topic: Optional[str] = "error-topic", multiple_inputs: Optional[str] = None, multiple_outputs: Optional[str] = None, extra: Dict[str, str] = {}, env_prefix: str = "APP_", pipeline: Optional[str] = None, consumer_group: Optional[str] = None, config_type: ConfigType = ConfigType.ENV, ) -> V1Deployment: template = get_template( input_topics, output_topic, error_topic, multiple_inputs=multiple_inputs, multiple_outputs=multiple_outputs, extra=extra, env_prefix=env_prefix, consumer_group=consumer_group, config_type=config_type, ) spec = V1DeploymentSpec(template=template, selector=V1LabelSelector()) metadata = get_metadata(name, pipeline=pipeline) return V1Deployment(metadata=metadata, spec=spec)
def get_reference_object(self) -> V1Deployment: """Get deployment object for outpost""" # Generate V1ContainerPort objects container_ports = [] for port in self.controller.deployment_ports: container_ports.append( V1ContainerPort( container_port=port.port, name=port.name, protocol=port.protocol.upper(), )) meta = self.get_object_meta(name=self.name) secret_name = f"authentik-outpost-{self.controller.outpost.uuid.hex}-api" image_prefix = CONFIG.y("outposts.docker_image_base") return V1Deployment( metadata=meta, spec=V1DeploymentSpec( replicas=self.outpost.config.kubernetes_replicas, selector=V1LabelSelector(match_labels=self.get_pod_meta()), template=V1PodTemplateSpec( metadata=V1ObjectMeta(labels=self.get_pod_meta()), spec=V1PodSpec(containers=[ V1Container( name=str(self.outpost.type), image= f"{image_prefix}-{self.outpost.type}:{__version__}", ports=container_ports, env=[ V1EnvVar( name="AUTHENTIK_HOST", value_from=V1EnvVarSource( secret_key_ref=V1SecretKeySelector( name=secret_name, key="authentik_host", )), ), V1EnvVar( name="AUTHENTIK_TOKEN", value_from=V1EnvVarSource( secret_key_ref=V1SecretKeySelector( name=secret_name, key="token", )), ), V1EnvVar( name="AUTHENTIK_INSECURE", value_from=V1EnvVarSource( secret_key_ref=V1SecretKeySelector( name=secret_name, key="authentik_host_insecure", )), ), ], ) ]), ), ), )
def __dict_to_deployment_spec(spec): ''' Converts a dictionary into kubernetes V1DeploymentSpec instance. ''' spec_obj = V1DeploymentSpec(selector=spec.get('selector'), template=spec.get('template')) for key, value in iteritems(spec): if hasattr(spec_obj, key): setattr(spec_obj, key, value) return spec_obj
def __create_app_deployment(self, labels): container_port = V1ContainerPort(container_port=self.container_port) config_map_ref = V1ConfigMapEnvSource(name=INFRA_DB_CONFIG) container = V1Container(name=self.container_name, image=self.image_name, image_pull_policy='IfNotPresent', ports=[container_port], env_from=[V1EnvFromSource(config_map_ref=config_map_ref)]) pod_spec = V1PodSpec(containers=[container]) pod_temp_spec = V1PodTemplateSpec(metadata=V1ObjectMeta(name=self.container_name, labels=labels), spec=pod_spec) deployment_spec = V1DeploymentSpec(replicas=1, selector=V1LabelSelector(match_labels=labels), template=pod_temp_spec) deployment = V1Deployment(metadata=V1ObjectMeta(name=self.container_name), spec=deployment_spec) self.appsApi.create_namespaced_deployment(namespace=TODO_APP_NAMESPACE, body=deployment)
def ensure_single_container_deployment(api_apps_v1, container, name, namespace, replicas=1): ensure_deployment( api=api_apps_v1, deployment=V1Deployment( api_version="apps/v1", metadata=V1ObjectMeta(name=name, labels={'app': name}), spec=V1DeploymentSpec( replicas=replicas, selector=V1LabelSelector(match_labels={'app': name}), template=V1PodTemplateSpec( metadata=V1ObjectMeta(name=name, labels={'app': name}), spec=V1PodSpec(containers=[container])))), name=name, namespace=namespace)
def test_sanitize_config_hash(self): mock_config = V1Deployment( metadata=V1ObjectMeta( name='qwe', labels={ 'mc': 'grindah', }, ), spec=V1DeploymentSpec( replicas=2, selector=V1LabelSelector(match_labels={ 'freq': '108.9', }, ), template=V1PodTemplateSpec(), ), ) ret = self.deployment.sanitize_for_config_hash(mock_config) assert 'replicas' not in ret['spec'].keys()
def __init__(self) -> None: container = V1Container( name="redpanda", image="vectorized/redpanda:v21.11.13", command=[ "/usr/bin/rpk", "redpanda", "start", "--overprovisioned", "--smp", "1", "--memory", "1G", "--reserve-memory", "0M", "--node-id", "0", "--check=false", "--set", "redpanda.enable_transactions=true", "--set", "redpanda.enable_idempotence=true", "--set", "redpanda.auto_create_topics_enabled=false", "--advertise-kafka-addr", "redpanda:9092", ], ) template = V1PodTemplateSpec( metadata=V1ObjectMeta(labels={"app": "redpanda"}), spec=V1PodSpec(containers=[container]), ) selector = V1LabelSelector(match_labels={"app": "redpanda"}) spec = V1DeploymentSpec(replicas=1, template=template, selector=selector) self.deployment = V1Deployment( api_version="apps/v1", kind="Deployment", metadata=V1ObjectMeta(name="redpanda"), spec=spec, )
def create_app(self, namespace, name, labels, app): """Create App. :param str namespace: :param str name: :param Dict labels: :param cloudshell.cp.kubernetes.models.deployment_requests. AppDeploymentRequest app: :rtype: AppsV1beta1Deployment """ app_selector = {TagsService.get_default_selector(name): name} labels.update(app_selector) annotations = {} meta = V1ObjectMeta(name=name, labels=labels) template_meta = V1ObjectMeta(labels=labels, annotations=annotations) container = self._prepare_app_container( name=app.name, image=app.image, start_command=app.start_command, environment_variables=app.environment_variables, compute_spec=app.compute_spec, internal_ports=app.internal_ports, external_ports=app.external_ports, ) pod_spec = V1PodSpec(containers=[container]) app_template = V1PodTemplateSpec(metadata=template_meta, spec=pod_spec) app_spec = V1DeploymentSpec( replicas=app.replicas, template=app_template, selector={"matchLabels": app_selector}, ) deployment = V1Deployment(metadata=meta, spec=app_spec) self._logger.info("Creating namespaced deployment for app {}".format(name)) self._logger.debug("Creating namespaced deployment with the following specs:") self._logger.debug(deployment.to_str()) return self._clients.apps_api.create_namespaced_deployment( namespace=namespace, body=deployment, pretty="true" )
def get_streaming_app_deployment( name, input_topics, output_topic, error_topic, multiple_inputs=None, multiple_outputs=None, env_prefix="APP_", pipeline=None, consumer_group=None, ) -> V1Deployment: template = get_template( input_topics, output_topic, error_topic, multiple_inputs=multiple_inputs, multiple_outputs=multiple_outputs, env_prefix=env_prefix, consumer_group=consumer_group, ) spec = V1DeploymentSpec(template=template, selector="app=test-app,release=test-release") metadata = get_metadata(name, pipeline=pipeline) return V1Deployment(metadata=metadata, spec=spec)
def format_kubernetes_app(self) -> Union[V1Deployment, V1StatefulSet]: """Create the configuration that will be passed to the Kubernetes REST API.""" try: system_paasta_config = load_system_paasta_config() docker_url = self.get_docker_url() code_sha = get_code_sha_from_dockerurl(docker_url) if self.get_persistent_volumes(): complete_config = V1StatefulSet( api_version='apps/v1', kind='StatefulSet', metadata=self.get_kubernetes_metadata(code_sha), spec=V1StatefulSetSpec( service_name="{service}-{instance}".format( service=self.get_sanitised_service_name(), instance=self.get_sanitised_instance_name(), ), volume_claim_templates=self.get_volume_claim_templates( ), replicas=self.get_desired_instances(), selector=V1LabelSelector(match_labels={ "service": self.get_service(), "instance": self.get_instance(), }, ), template=self.get_pod_template_spec( code_sha=code_sha, system_paasta_config=system_paasta_config, ), ), ) else: complete_config = V1Deployment( api_version='apps/v1', kind='Deployment', metadata=self.get_kubernetes_metadata(code_sha), spec=V1DeploymentSpec( replicas=self.get_desired_instances(), selector=V1LabelSelector(match_labels={ "service": self.get_service(), "instance": self.get_instance(), }, ), template=self.get_pod_template_spec( code_sha=code_sha, system_paasta_config=system_paasta_config, ), strategy=self.get_deployment_strategy_config(), ), ) config_hash = get_config_hash( self.sanitize_for_config_hash(complete_config), force_bounce=self.get_force_bounce(), ) complete_config.metadata.labels['config_sha'] = config_hash complete_config.spec.template.metadata.labels[ 'config_sha'] = config_hash except Exception as e: raise InvalidKubernetesConfig(e, self.get_service(), self.get_instance()) log.debug("Complete configuration for instance is: %s", complete_config) return complete_config
def test_reconcile_kubernetes_deployment(): with mock.patch( 'paasta_tools.setup_kubernetes_job.load_kubernetes_service_config_no_cache', autospec=True, ) as mock_load_kubernetes_service_config_no_cache, mock.patch( 'paasta_tools.setup_kubernetes_job.load_system_paasta_config', autospec=True, ), mock.patch( 'paasta_tools.setup_kubernetes_job.create_deployment', autospec=True, ) as mock_create_deployment, mock.patch( 'paasta_tools.setup_kubernetes_job.update_deployment', autospec=True, ) as mock_update_deployment: mock_kube_client = mock.Mock() mock_deployments: Sequence[KubeDeployment] = [] # no deployments so should create ret = reconcile_kubernetes_deployment( kube_client=mock_kube_client, service='kurupt', instance='fm', kube_deployments=mock_deployments, soa_dir='/nail/blah', ) assert ret == (0, None) mock_deploy = mock_load_kubernetes_service_config_no_cache.return_value.format_kubernetes_app( ) mock_create_deployment.assert_called_with( kube_client=mock_kube_client, formatted_deployment=mock_deploy, ) # different instance so should create mock_deployments = [ KubeDeployment( service='kurupt', instance='garage', git_sha='a12345', config_sha='b12345', replicas=3, ) ] ret = reconcile_kubernetes_deployment( kube_client=mock_kube_client, service='kurupt', instance='fm', kube_deployments=mock_deployments, soa_dir='/nail/blah', ) assert ret == (0, None) mock_create_deployment.assert_called_with( kube_client=mock_kube_client, formatted_deployment=mock_deploy, ) # instance correc so do nothing mock_create_deployment.reset_mock() mock_load_kubernetes_service_config_no_cache.return_value = mock.Mock( format_kubernetes_app=mock.Mock(return_value=V1Deployment( metadata=V1ObjectMeta(labels={ 'git_sha': 'a12345', 'config_sha': 'b12345', }, ), spec=V1DeploymentSpec( selector=V1LabelSelector(), template=V1PodTemplateSpec(), replicas=3, ), ), )) mock_deployments = [ KubeDeployment( service='kurupt', instance='fm', git_sha='a12345', config_sha='b12345', replicas=3, ) ] ret = reconcile_kubernetes_deployment( kube_client=mock_kube_client, service='kurupt', instance='fm', kube_deployments=mock_deployments, soa_dir='/nail/blah', ) assert ret == (0, None) assert not mock_create_deployment.called assert not mock_update_deployment.called # changed gitsha so update mock_create_deployment.reset_mock() mock_load_kubernetes_service_config_no_cache.return_value = mock.Mock( format_kubernetes_app=mock.Mock(return_value=V1Deployment( metadata=V1ObjectMeta(labels={ 'git_sha': 'new_image', 'config_sha': 'b12345', }, ), spec=V1DeploymentSpec( selector=V1LabelSelector(), template=V1PodTemplateSpec(), replicas=3, ), ), )) mock_deployments = [ KubeDeployment( service='kurupt', instance='fm', git_sha='a12345', config_sha='b12345', replicas=3, ) ] ret = reconcile_kubernetes_deployment( kube_client=mock_kube_client, service='kurupt', instance='fm', kube_deployments=mock_deployments, soa_dir='/nail/blah', ) assert ret == (0, None) assert not mock_create_deployment.called mock_deploy = mock_load_kubernetes_service_config_no_cache.return_value.format_kubernetes_app( ) mock_update_deployment.assert_called_with( kube_client=mock_kube_client, formatted_deployment=mock_deploy, ) # changed configsha so update mock_create_deployment.reset_mock() mock_update_deployment.reset_mock() mock_load_kubernetes_service_config_no_cache.return_value = mock.Mock( format_kubernetes_app=mock.Mock(return_value=V1Deployment( metadata=V1ObjectMeta(labels={ 'git_sha': 'a12345', 'config_sha': 'newconfig', }, ), spec=V1DeploymentSpec( selector=V1LabelSelector(), template=V1PodTemplateSpec(), replicas=3, ), ), )) mock_deployments = [ KubeDeployment( service='kurupt', instance='fm', git_sha='a12345', config_sha='b12345', replicas=3, ) ] ret = reconcile_kubernetes_deployment( kube_client=mock_kube_client, service='kurupt', instance='fm', kube_deployments=mock_deployments, soa_dir='/nail/blah', ) assert ret == (0, None) assert not mock_create_deployment.called mock_deploy = mock_load_kubernetes_service_config_no_cache.return_value.format_kubernetes_app( ) mock_update_deployment.assert_called_with( kube_client=mock_kube_client, formatted_deployment=mock_deploy, ) # changed number of replicas so update mock_create_deployment.reset_mock() mock_update_deployment.reset_mock() mock_load_kubernetes_service_config_no_cache.return_value = mock.Mock( format_kubernetes_app=mock.Mock(return_value=V1Deployment( metadata=V1ObjectMeta(labels={ 'git_sha': 'a12345', 'config_sha': 'b12345', }, ), spec=V1DeploymentSpec( selector=V1LabelSelector(), template=V1PodTemplateSpec(), replicas=2, ), ), )) mock_deployments = [ KubeDeployment( service='kurupt', instance='fm', git_sha='a12345', config_sha='b12345', replicas=3, ) ] ret = reconcile_kubernetes_deployment( kube_client=mock_kube_client, service='kurupt', instance='fm', kube_deployments=mock_deployments, soa_dir='/nail/blah', ) assert ret == (0, None) assert not mock_create_deployment.called mock_deploy = mock_load_kubernetes_service_config_no_cache.return_value.format_kubernetes_app( ) mock_update_deployment.assert_called_with( kube_client=mock_kube_client, formatted_deployment=mock_deploy, ) # error cases... mock_create_deployment.reset_mock() mock_update_deployment.reset_mock() mock_load_kubernetes_service_config_no_cache.side_effect = NoDeploymentsAvailable ret = reconcile_kubernetes_deployment( kube_client=mock_kube_client, service='kurupt', instance='fm', kube_deployments=mock_deployments, soa_dir='/nail/blah', ) assert ret == (0, None) assert not mock_create_deployment.called assert not mock_update_deployment.called mock_load_kubernetes_service_config_no_cache.side_effect = NoConfigurationForServiceError ret = reconcile_kubernetes_deployment( kube_client=mock_kube_client, service='kurupt', instance='fm', kube_deployments=mock_deployments, soa_dir='/nail/blah', ) assert ret == (1, None) assert not mock_create_deployment.called assert not mock_update_deployment.called mock_load_kubernetes_service_config_no_cache.side_effect = None mock_load_kubernetes_service_config_no_cache.return_value = mock.Mock( format_kubernetes_app=mock.Mock(side_effect=NoDockerImageError), ) ret = reconcile_kubernetes_deployment( kube_client=mock_kube_client, service='kurupt', instance='fm', kube_deployments=mock_deployments, soa_dir='/nail/blah', ) assert ret == (1, None) assert not mock_create_deployment.called assert not mock_update_deployment.called
def test_format_kubernetes_app_dict(self): with mock.patch( 'paasta_tools.kubernetes_tools.load_system_paasta_config', autospec=True, ) as mock_load_system_config, mock.patch( 'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_docker_url', autospec=True, ) as mock_get_docker_url, mock.patch( 'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_volumes', autospec=True, ) as mock_get_volumes, mock.patch( 'paasta_tools.kubernetes_tools.get_code_sha_from_dockerurl', autospec=True, ) as mock_get_code_sha_from_dockerurl, mock.patch( 'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_sanitised_service_name', autospec=True, return_value='kurupt', ), mock.patch( 'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_sanitised_instance_name', autospec=True, return_value='fm', ), mock.patch( 'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_service', autospec=True, ) as mock_get_service, mock.patch( 'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_instance', autospec=True, ) as mock_get_instance, mock.patch( 'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_instances', autospec=True, ) as mock_get_instances, mock.patch( 'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_deployment_strategy_config', autospec=True, ) as mock_get_deployment_strategy_config, mock.patch( 'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_kubernetes_containers', autospec=True, ) as mock_get_kubernetes_containers, mock.patch( 'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_pod_volumes', autospec=True, return_value=[], ) as mock_get_pod_volumes, mock.patch( 'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_sanitised_volume_name', autospec=True, ), mock.patch( 'paasta_tools.kubernetes_tools.get_config_hash', autospec=True, ) as mock_get_config_hash, mock.patch( 'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_force_bounce', autospec=True, ) as mock_get_force_bounce, mock.patch( 'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.sanitize_for_config_hash', autospec=True, ) as mock_sanitize_for_config_hash: ret = self.deployment.format_kubernetes_app() assert mock_load_system_config.called assert mock_get_docker_url.called assert mock_get_volumes.called assert mock_get_pod_volumes.called mock_get_config_hash.assert_called_with( mock_sanitize_for_config_hash.return_value, force_bounce=mock_get_force_bounce.return_value, ) expected = V1Deployment( metadata=V1ObjectMeta( labels={ 'config_sha': mock_get_config_hash.return_value, 'git_sha': mock_get_code_sha_from_dockerurl.return_value, 'instance': mock_get_instance.return_value, 'service': mock_get_service.return_value, }, name='kurupt-fm', ), spec=V1DeploymentSpec( replicas=mock_get_instances.return_value, selector=V1LabelSelector(match_labels={ 'instance': mock_get_instance.return_value, 'service': mock_get_service.return_value, }, ), strategy=mock_get_deployment_strategy_config.return_value, template=V1PodTemplateSpec( metadata=V1ObjectMeta(labels={ 'config_sha': mock_get_config_hash.return_value, 'git_sha': mock_get_code_sha_from_dockerurl.return_value, 'instance': mock_get_instance.return_value, 'service': mock_get_service.return_value, }, ), spec=V1PodSpec( containers=mock_get_kubernetes_containers. return_value, restart_policy='Always', volumes=[], ), ), ), ) assert ret == expected
def ensure_traefik(api_core_v1, api_ext_v1_beta1, api_apps_v1, api_custom, api_rbac_auth_v1_b1, admin_email, domain, static_ip, oauth_client_id, oauth_client_secret, oauth_domain, oauth_secret): ensure_crd(api=api_ext_v1_beta1, name='ingressroutes.traefik.containo.us', group='traefik.containo.us', kind='IngressRoute', plural='ingressroutes', singular='ingressroute', scope='Namespaced') ensure_crd(api=api_ext_v1_beta1, name='ingressroutetcps.traefik.containo.us', group='traefik.containo.us', kind='IngressRouteTCP', plural='ingressroutetcps', singular='ingressroutetcp', scope='Namespaced') ensure_crd(api=api_ext_v1_beta1, name='middlewares.traefik.containo.us', group='traefik.containo.us', kind='Middleware', plural='middlewares', singular='middleware', scope='Namespaced') ensure_crd(api=api_ext_v1_beta1, name='tlsoptions.traefik.containo.us', group='traefik.containo.us', kind='TLSOption', plural='tlsoptions', singular='tlsoption', scope='Namespaced') ensure_role(api=api_rbac_auth_v1_b1, role=V1ClusterRole( api_version='rbac.authorization.k8s.io/v1beta1', kind='ClusterRole', metadata=V1ObjectMeta(name='traefik-ingress-controller'), rules=[ V1PolicyRule( api_groups=[''], resources=['services', 'endpoints', 'secrets'], verbs=['get', 'list', 'watch']), V1PolicyRule(api_groups=['extensions'], resources=['ingresses'], verbs=['get', 'list', 'watch']), V1PolicyRule(api_groups=['extensions'], resources=['ingresses/status'], verbs=['update']), V1PolicyRule(api_groups=['traefik.containo.us'], resources=['middlewares'], verbs=['get', 'list', 'watch']), V1PolicyRule(api_groups=['traefik.containo.us'], resources=['ingressroutes'], verbs=['get', 'list', 'watch']), V1PolicyRule(api_groups=['traefik.containo.us'], resources=['ingressroutetcps'], verbs=['get', 'list', 'watch']), V1PolicyRule(api_groups=['traefik.containo.us'], resources=['tlsoptions'], verbs=['get', 'list', 'watch']) ]), name='traefik-ingress-controller') ensure_role_binding( api=api_rbac_auth_v1_b1, role_binding=V1ClusterRoleBinding( api_version='rbac.authorization.k8s.io/v1beta1', kind='ClusterRoleBinding', metadata=V1ObjectMeta(name='traefik-ingress-controller'), role_ref=V1RoleRef(api_group='rbac.authorization.k8s.io', kind='ClusterRole', name='traefik-ingress-controller'), subjects=[ V1Subject(kind='ServiceAccount', name='traefik-ingress-controller', namespace='default') ]), name='traefik-ingress-controller') ensure_service( api=api_core_v1, service=V1Service( api_version="v1", metadata=V1ObjectMeta(name='traefik'), spec=V1ServiceSpec( type='LoadBalancer', load_balancer_ip=static_ip, ports=[ # V1ServicePort( # protocol='TCP', # port=80, # name='web' # ), V1ServicePort(protocol='TCP', port=443, name='websecure'), ], selector={'app': 'traefik'})), name='traefik', namespace='default') ensure_service_account( api=api_core_v1, account=V1ServiceAccount( api_version="v1", metadata=V1ObjectMeta(name='traefik-ingress-controller'), ), name='traefik-ingress-controller', namespace='default') ensure_deployment( api=api_apps_v1, deployment=V1Deployment( api_version="apps/v1", metadata=V1ObjectMeta(name='traefik', labels={'app': 'traefik'}), spec=V1DeploymentSpec( replicas=1, selector=V1LabelSelector(match_labels={'app': 'traefik'}), template=V1PodTemplateSpec( metadata=V1ObjectMeta(name='traefik', labels={'app': 'traefik'}), spec=V1PodSpec( service_account_name='traefik-ingress-controller', containers=[ V1Container( name='traefik', image='traefik:v2.0', args=[ '--api.insecure', '--accesslog', '--entrypoints.web.Address=:80', '--entrypoints.websecure.Address=:443', '--providers.kubernetescrd', '--certificatesresolvers.default.acme.tlschallenge', f'--certificatesresolvers.default.acme.email={admin_email}', '--certificatesresolvers.default.acme.storage=acme.json', # '--certificatesresolvers.default.acme.caserver=https://acme-staging-v02.api.letsencrypt.org/directory', ], ports=[ V1ContainerPort(name='web', container_port=8000), V1ContainerPort(name='websecure', container_port=4443), V1ContainerPort(name='admin', container_port=8080), ]) ])))), name='traefik', namespace='default') ensure_deployment( api=api_apps_v1, deployment=V1Deployment( api_version="apps/v1", metadata=V1ObjectMeta(name='traefik-forward-auth', labels={'app': 'traefik-forward-auth'}), spec=V1DeploymentSpec( replicas=1, selector=V1LabelSelector( match_labels={'app': 'traefik-forward-auth'}), template=V1PodTemplateSpec( metadata=V1ObjectMeta( name='traefik-forward-auth', labels={'app': 'traefik-forward-auth'}), spec=V1PodSpec(containers=[ V1Container( name='traefik-forward-auth', image='thomseddon/traefik-forward-auth:2', ports=[ V1ContainerPort(name='auth', container_port=4181), ], env=[ V1EnvVar(name='PROVIDERS_GOOGLE_CLIENT_ID', value=oauth_client_id), # V1EnvVar(name='LOG_LEVEL', value='trace'), V1EnvVar(name='PROVIDERS_GOOGLE_CLIENT_SECRET', value=oauth_client_secret), V1EnvVar(name='SECRET', value=oauth_secret), V1EnvVar(name='DOMAIN', value=oauth_domain), V1EnvVar(name='COOKIE_DOMAIN', value=domain), V1EnvVar(name='AUTH_HOST', value=f'auth.{domain}'), ]) ])))), name='traefik-forward-auth', namespace='default') ensure_custom_object(api=api_custom, custom_object={ 'apiVersion': 'traefik.containo.us/v1alpha1', 'kind': 'IngressRoute', 'metadata': { 'name': 'traefik-forward-auth', }, 'spec': { 'entryPoints': ['websecure'], 'routes': [{ 'match': f'Host(`auth.{domain}`)', 'kind': 'Rule', 'services': [{ 'name': 'traefik-forward-auth', 'port': 4181 }], 'middlewares': [{ 'name': 'traefik-forward-auth' }] }], 'tls': { 'certResolver': 'default' } } }, group='traefik.containo.us', plural='ingressroutes', version='v1alpha1', name='traefik-forward-auth', namespace='default') ensure_custom_object(api=api_custom, custom_object={ 'apiVersion': 'traefik.containo.us/v1alpha1', 'kind': 'Middleware', 'metadata': { 'name': 'traefik-forward-auth', }, 'spec': { 'forwardAuth': { 'address': 'http://traefik-forward-auth:4181', 'authResponseHeaders': ['X-Forwarded-User'], } } }, group='traefik.containo.us', plural='middlewares', version='v1alpha1', name='traefik-forward-auth', namespace='default') ensure_service(api=api_core_v1, service=V1Service( api_version="v1", metadata=V1ObjectMeta(name='traefik-forward-auth'), spec=V1ServiceSpec( type='ClusterIP', ports=[ V1ServicePort(protocol='TCP', port=4181, name='auth'), ], selector={'app': 'traefik-forward-auth'})), name='traefik-forward-auth', namespace='default') ensure_whoami(api_apps_v1, api_core_v1, api_custom, domain)
def _create_deployment(self, service_name: str, deployment_name: str, docker_config: DockerConfig, shutdown_seconds: int, scale: int, labels: dict[str, str] = None, volumes: list[V1Volume] = None, mounts: list[V1VolumeMount] = None, core_mounts: bool = False, change_key: str = ''): # Build a cache key to check for changes, just trying to only patch what changed # will still potentially result in a lot of restarts due to different kubernetes # systems returning differently formatted data lbls = sorted((labels or {}).items()) svc_env = sorted(self._service_limited_env[service_name].items()) change_key = (f"n={deployment_name}{change_key}dc={docker_config}ss={shutdown_seconds}" f"l={lbls}v={volumes}m={mounts}cm={core_mounts}senv={svc_env}") # Check if a deployment already exists, and if it does check if it has the same change key set replace = None try: replace = self.apps_api.read_namespaced_deployment( deployment_name, namespace=self.namespace, _request_timeout=API_TIMEOUT) if replace.metadata.annotations.get(CHANGE_KEY_NAME) == change_key: if replace.spec.replicas != scale: self.set_target(service_name, scale) return except ApiException as error: if error.status != 404: raise # If we have been given a username or password for the registry, we have to # update it, if we haven't been, make sure its been cleaned up in the system # so we don't leave passwords lying around pull_secret_name = f'{deployment_name}-container-pull-secret' use_pull_secret = False try: current_pull_secret = self.api.read_namespaced_secret(pull_secret_name, self.namespace, _request_timeout=API_TIMEOUT) except ApiException as error: if error.status != 404: raise current_pull_secret = None if docker_config.registry_username or docker_config.registry_password: use_pull_secret = True # Build the secret we want to make new_pull_secret = V1Secret( metadata=V1ObjectMeta(name=pull_secret_name, namespace=self.namespace), type='kubernetes.io/dockerconfigjson', string_data={ '.dockerconfigjson': create_docker_auth_config( image=docker_config.image, username=docker_config.registry_username, password=docker_config.registry_password, ) } ) # Send it to the server if current_pull_secret: self.api.patch_namespaced_secret(pull_secret_name, namespace=self.namespace, body=new_pull_secret, _request_timeout=API_TIMEOUT) else: self.api.create_namespaced_secret(namespace=self.namespace, body=new_pull_secret, _request_timeout=API_TIMEOUT) elif current_pull_secret: self.api.delete_namespaced_secret(pull_secret_name, self.namespace, _request_timeout=API_TIMEOUT) all_labels = dict(self._labels) all_labels['component'] = service_name if core_mounts: all_labels['privilege'] = 'core' all_labels.update(labels or {}) # Build set of volumes, first the global mounts, then the core specific ones, # then the ones specific to this container only all_volumes: list[V1Volume] = [] all_mounts: list[V1VolumeMount] = [] all_volumes.extend(self.config_volumes.values()) all_mounts.extend(self.config_mounts.values()) if core_mounts: all_volumes.extend(self.core_config_volumes.values()) all_mounts.extend(self.core_config_mounts.values()) all_volumes.extend(volumes or []) all_mounts.extend(mounts or []) # Build metadata metadata = V1ObjectMeta(name=deployment_name, labels=all_labels, annotations={CHANGE_KEY_NAME: change_key}) pod = V1PodSpec( volumes=all_volumes, containers=self._create_containers(service_name, deployment_name, docker_config, all_mounts, core_container=core_mounts), priority_class_name=self.priority, termination_grace_period_seconds=shutdown_seconds, security_context=V1PodSecurityContext(fs_group=1000) ) if use_pull_secret: pod.image_pull_secrets = [V1LocalObjectReference(name=pull_secret_name)] template = V1PodTemplateSpec( metadata=metadata, spec=pod, ) spec = V1DeploymentSpec( replicas=int(scale), revision_history_limit=0, selector=V1LabelSelector(match_labels=all_labels), template=template, ) deployment = V1Deployment( kind="Deployment", metadata=metadata, spec=spec, ) if replace: self.logger.info("Requesting kubernetes replace deployment info for: " + metadata.name) try: self.apps_api.replace_namespaced_deployment(namespace=self.namespace, body=deployment, name=metadata.name, _request_timeout=API_TIMEOUT) return except ApiException as error: if error.status == 422: # Replacement of an immutable field (ie. labels); Delete and re-create self.stop_containers(labels=dict(component=service_name)) else: self.logger.info("Requesting kubernetes create deployment info for: " + metadata.name) self.apps_api.create_namespaced_deployment(namespace=self.namespace, body=deployment, _request_timeout=API_TIMEOUT)
def generate_secrets_server_deployment( secrets_server_config: SecretsServerConfig, ): service_name = secrets_server_config.service_name() secret_name = secrets_server_config.secrets().concourse_secret_name() # We need to ensure that the labels and selectors match for both the deployment and the service, # therefore we base them on the configured service name. labels = {'app': service_name} return V1Deployment( kind='Deployment', metadata=V1ObjectMeta(name=service_name, labels=labels), spec=V1DeploymentSpec( replicas=1, selector=V1LabelSelector(match_labels=labels), template=V1PodTemplateSpec( metadata=V1ObjectMeta(labels=labels), spec=V1PodSpec(containers=[ V1Container( image='eu.gcr.io/gardener-project/cc/job-image:latest', image_pull_policy='IfNotPresent', name='secrets-server', resources=V1ResourceRequirements( requests={ 'cpu': '50m', 'memory': '50Mi' }, limits={ 'cpu': '50m', 'memory': '50Mi' }, ), command=['bash'], args=[ '-c', ''' # chdir to secrets dir; create if absent mkdir -p /secrets && cd /secrets # make Kubernetes serviceaccount secrets available by default cp -r /var/run/secrets/kubernetes.io/serviceaccount serviceaccount # store Kubernetes service endpoint env as file for consumer env | grep KUBERNETES_SERVICE > serviceaccount/env # launch secrets server serving secrets dir contents on all IFs python3 -m http.server 8080 ''' ], ports=[ V1ContainerPort(container_port=8080), ], liveness_probe=V1Probe( tcp_socket=V1TCPSocketAction(port=8080), initial_delay_seconds=10, period_seconds=10, ), volume_mounts=[ V1VolumeMount( name=secret_name, mount_path='/secrets/concourse-secrets', read_only=True, ), ], ), ], node_selector={ "worker.garden.sapcloud.io/group": "cc-control" }, volumes=[ V1Volume(name=secret_name, secret=V1SecretVolumeSource( secret_name=secret_name, )) ]))))
def format_kubernetes_app(self) -> V1Deployment: """Create the configuration that will be passed to the Kubernetes REST API.""" system_paasta_config = load_system_paasta_config() docker_url = self.get_docker_url() # service_namespace_config = load_service_namespace_config( # service=self.service, # namespace=self.get_nerve_namespace(), # ) docker_volumes = self.get_volumes(system_volumes=system_paasta_config.get_volumes()) code_sha = get_code_sha_from_dockerurl(docker_url) complete_config = V1Deployment( metadata=V1ObjectMeta( name="{service}-{instance}".format( service=self.get_sanitised_service_name(), instance=self.get_sanitised_instance_name(), ), labels={ "service": self.get_service(), "instance": self.get_instance(), "git_sha": code_sha, }, ), spec=V1DeploymentSpec( replicas=self.get_instances(), selector=V1LabelSelector( match_labels={ "service": self.get_service(), "instance": self.get_instance(), }, ), strategy=self.get_deployment_strategy_config(), template=V1PodTemplateSpec( metadata=V1ObjectMeta( labels={ "service": self.get_service(), "instance": self.get_instance(), "git_sha": code_sha, }, ), spec=V1PodSpec( containers=self.get_kubernetes_containers( volumes=docker_volumes, system_paasta_config=system_paasta_config, ), restart_policy="Always", volumes=self.get_pod_volumes(docker_volumes), ), ), ), ) config_hash = get_config_hash( self.sanitize_for_config_hash(complete_config), force_bounce=self.get_force_bounce(), ) complete_config.metadata.labels['config_sha'] = config_hash complete_config.spec.template.metadata.labels['config_sha'] = config_hash log.debug("Complete configuration for instance is: %s", complete_config) return complete_config
def manage_deployment(self, logger): serialized_vars = json.dumps(self.vars, sort_keys=True, separators=(',', ':')) create_deployment = False try: deployment = apps_v1_api.read_namespaced_deployment( self.deployment_name, self.deployment_namespace) update_required = False for envvar in deployment.spec.template.spec.containers[0].env: if envvar.name == 'AUTH_USERNAME': if envvar.value != self.auth_username: envvar.value = self.auth_username updated_required = True elif envvar.name == 'AUTH_PASSWORD': if envvar.value != self.auth_password: envvar.value = self.auth_password updated_required = True elif envvar.name == 'WORKSHOP_VARS': if envvar.value != serialized_vars: envvar.value = serialized_vars updated_required = True if update_required: apps_v1_api.replace_namespaced_deployment( self.deployment_name, self.deployment_namespace, deployment) except ApiException as e: if e.status == 404: create_deployment = True else: raise if create_deployment: logger.info( f"Creating Deployment {self.deployment_name} in {self.deployment_namespace}" ) deployment = V1Deployment( metadata=V1ObjectMeta( annotations={ owner_annotation: self.make_owner_annotation(), }, labels={owner_uid_label: self.uid}, name=self.deployment_name, ), spec=V1DeploymentSpec( replicas=1, selector=V1LabelSelector( match_labels={"name": self.deployment_name}), strategy=V1DeploymentStrategy(type="Recreate"), template=V1PodTemplateSpec( metadata=V1ObjectMeta( labels={"name": self.deployment_name}), spec=V1PodSpec( containers=[ V1Container( name="bookbag", env=[ V1EnvVar( name="APPLICATION_NAME", value=self.deployment_name, ), V1EnvVar( name="AUTH_USERNAME", value=self.auth_username, ), V1EnvVar( name="AUTH_PASSWORD", value=self.auth_password, ), V1EnvVar(name="CLUSTER_SUBDOMAIN", ), V1EnvVar( name="OAUTH_SERVICE_ACCOUNT", value=self.deployment_name, ), V1EnvVar( name="WORKSHOP_VARS", value=serialized_vars, ), V1EnvVar(name="DOWNLOAD_URL", ), V1EnvVar(name="WORKSHOP_FILE", ), V1EnvVar(name="OC_VERSION", ), V1EnvVar(name="ODO_VERSION", ), V1EnvVar(name="KUBECTL_VERSION", ), ], image=self.get_image(), image_pull_policy="Always", ports=[ V1ContainerPort(container_port=10080) ], ) ], service_account_name=self.deployment_name, )), ), ) if self.image_stream_name: deployment.metadata.annotations[ 'image.openshift.io/triggers'] = json.dumps([{ "fieldPath": 'spec.template.spec.containers[?(@.name=="bookbag")].image', "from": { "kind": "ImageStreamTag", "name": f"{self.image_stream_name}:latest", "namespace": self.image_stream_namespace, }, }]) deployment = apps_v1_api.create_namespaced_deployment( self.deployment_namespace, deployment) return deployment
def get_reference_object(self) -> V1Deployment: """Get deployment object for outpost""" # Generate V1ContainerPort objects container_ports = [] for port in self.controller.deployment_ports: container_ports.append( V1ContainerPort( container_port=port.inner_port or port.port, name=port.name, protocol=port.protocol.upper(), ) ) meta = self.get_object_meta(name=self.name) image_name = self.controller.get_container_image() image_pull_secrets = self.outpost.config.kubernetes_image_pull_secrets version = get_full_version() return V1Deployment( metadata=meta, spec=V1DeploymentSpec( replicas=self.outpost.config.kubernetes_replicas, selector=V1LabelSelector(match_labels=self.get_pod_meta()), template=V1PodTemplateSpec( metadata=V1ObjectMeta( labels=self.get_pod_meta( **{ # Support istio-specific labels, but also use the standard k8s # recommendations "app.kubernetes.io/version": version, "app": "authentik-outpost", "version": version, } ) ), spec=V1PodSpec( image_pull_secrets=[ V1ObjectReference(name=secret) for secret in image_pull_secrets ], containers=[ V1Container( name=str(self.outpost.type), image=image_name, ports=container_ports, env=[ V1EnvVar( name="AUTHENTIK_HOST", value_from=V1EnvVarSource( secret_key_ref=V1SecretKeySelector( name=self.name, key="authentik_host", ) ), ), V1EnvVar( name="AUTHENTIK_HOST_BROWSER", value_from=V1EnvVarSource( secret_key_ref=V1SecretKeySelector( name=self.name, key="authentik_host_browser", ) ), ), V1EnvVar( name="AUTHENTIK_TOKEN", value_from=V1EnvVarSource( secret_key_ref=V1SecretKeySelector( name=self.name, key="token", ) ), ), V1EnvVar( name="AUTHENTIK_INSECURE", value_from=V1EnvVarSource( secret_key_ref=V1SecretKeySelector( name=self.name, key="authentik_host_insecure", ) ), ), ], ) ], ), ), ), )