def test_sdk_e2e(): container = V1Container( name="pytorch", image="gcr.io/kubeflow-ci/pytorch-dist-mnist-test:v1.0", args=["--backend", "gloo"], ) master = V1ReplicaSpec( replicas=1, restart_policy="OnFailure", template=V1PodTemplateSpec(spec=V1PodSpec(containers=[container]))) worker = V1ReplicaSpec( replicas=1, restart_policy="OnFailure", template=V1PodTemplateSpec(spec=V1PodSpec(containers=[container]))) pytorchjob = V1PyTorchJob(api_version="kubeflow.org/v1", kind="PyTorchJob", metadata=V1ObjectMeta( name="pytorchjob-mnist-ci-test", namespace='default'), spec=V1PyTorchJobSpec(clean_pod_policy="None", pytorch_replica_specs={ "Master": master, "Worker": worker })) PYTORCH_CLIENT.create(pytorchjob) wait_for_pytorchjob_ready("pytorchjob-mnist-ci-test") PYTORCH_CLIENT.delete('pytorchjob-mnist-ci-test', namespace='default')
def setUp(self): super().setUp() self.cluster_dict = getExampleClusterDefinition() self.cluster_object = V1MongoClusterConfiguration(**self.cluster_dict) self.name = self.cluster_object.metadata.name self.namespace = self.cluster_object.metadata.namespace self.stateful_set = V1beta1StatefulSet( metadata=self._createMeta(self.name), spec=V1beta1StatefulSetSpec( replicas=3, service_name=self.name, template=V1PodTemplateSpec( metadata=V1ObjectMeta(labels=KubernetesResources. createDefaultLabels(self.name)), spec=V1PodSpec(containers=[ V1Container( name="mongodb", env=[ V1EnvVar(name="POD_IP", value_from=V1EnvVarSource( field_ref=V1ObjectFieldSelector( api_version="v1", field_path="status.podIP"))) ], command=[ "mongod", "--replSet", self.name, "--bind_ip", "0.0.0.0", "--smallfiles", "--noprealloc" ], image="mongo:3.6.4", ports=[ V1ContainerPort(name="mongodb", container_port=27017, protocol="TCP") ], volume_mounts=[ V1VolumeMount(name="mongo-storage", read_only=False, mount_path="/data/db") ], resources=V1ResourceRequirements(limits={ "cpu": "100m", "memory": "64Mi" }, requests={ "cpu": "100m", "memory": "64Mi" })) ])), volume_claim_templates=[ V1PersistentVolumeClaim( metadata=V1ObjectMeta(name="mongo-storage"), spec=V1PersistentVolumeClaimSpec( access_modes=["ReadWriteOnce"], resources=V1ResourceRequirements( requests={"storage": "30Gi"}))) ], ), )
def get_streaming_app_cronjob( name: str = "test-cronjob", input_topics: Optional[str] = None, output_topic: Optional[str] = "output-topic", error_topic: Optional[str] = "error-topic", env_prefix: str = "APP_", pipeline: Optional[str] = None, ) -> V1beta1CronJob: env = get_env( input_topics, output_topic, error_topic, env_prefix=env_prefix, ) container = V1Container(name="test-container", env=env) pod_spec = V1PodSpec(containers=[container]) pod_template_spec = V1PodTemplateSpec(spec=pod_spec) job_spec = V1JobSpec( template=pod_template_spec, selector=None, ) job_template = V1beta1JobTemplateSpec(spec=job_spec) spec = V1beta1CronJobSpec(job_template=job_template, schedule="* * * * *") metadata = get_metadata(name, pipeline=pipeline) return V1beta1CronJob(metadata=metadata, spec=spec)
def test_sdk_e2e(): container = V1Container( name="tensorflow", image="gcr.io/kubeflow-ci/tf-mnist-with-summaries:1.0", command=[ "python", "/var/tf_mnist/mnist_with_summaries.py", "--log_dir=/train/logs", "--learning_rate=0.01", "--batch_size=150" ]) worker = V1ReplicaSpec( replicas=1, restart_policy="Never", template=V1PodTemplateSpec(spec=V1PodSpec(containers=[container]))) tfjob = V1TFJob(api_version="kubeflow.org/v1", kind="TFJob", metadata=V1ObjectMeta(name="mnist-ci-test", namespace=SDK_TEST_NAMESPACE), spec=V1TFJobSpec(clean_pod_policy="None", tf_replica_specs={"Worker": worker})) TFJOB_CLIENT.create(tfjob, namespace=SDK_TEST_NAMESPACE) TFJOB_CLIENT.wait_for_job("mnist-ci-test", namespace=SDK_TEST_NAMESPACE) if not TFJOB_CLIENT.if_job_succeeded("mnist-ci-test", namespace=SDK_TEST_NAMESPACE): raise RuntimeError("The TFJob is not succeeded.") TFJOB_CLIENT.delete("mnist-ci-test", namespace=SDK_TEST_NAMESPACE)
def generate_delaying_proxy_deployment(concourse_cfg: ConcourseConfig): ensure_not_none(concourse_cfg) external_url = concourse_cfg.external_url() label = {'app': 'delaying-proxy'} return V1Deployment( kind='Deployment', metadata=V1ObjectMeta(name='delaying-proxy'), spec=V1DeploymentSpec( replicas=1, selector=V1LabelSelector(match_labels=label), template=V1PodTemplateSpec( metadata=V1ObjectMeta(labels=label), spec=V1PodSpec(containers=[ V1Container( image= 'eu.gcr.io/gardener-project/cc/github-enterprise-proxy:0.1.0', image_pull_policy='IfNotPresent', name='delaying-proxy', ports=[ V1ContainerPort(container_port=8080), ], liveness_probe=V1Probe( tcp_socket=V1TCPSocketAction(port=8080), initial_delay_seconds=10, period_seconds=10, ), env=[ V1EnvVar(name='CONCOURSE_URL', value=external_url), ], ), ], ))))
def get_pod_template_spec( self, code_sha: str, system_paasta_config: SystemPaastaConfig, ) -> V1PodTemplateSpec: service_namespace_config = load_service_namespace_config( service=self.service, namespace=self.get_nerve_namespace(), ) docker_volumes = self.get_volumes( system_volumes=system_paasta_config.get_volumes()) return V1PodTemplateSpec( metadata=V1ObjectMeta(labels={ "service": self.get_service(), "instance": self.get_instance(), "git_sha": code_sha, }, ), spec=V1PodSpec( containers=self.get_kubernetes_containers( docker_volumes=docker_volumes, aws_ebs_volumes=self.get_aws_ebs_volumes(), system_paasta_config=system_paasta_config, service_namespace_config=service_namespace_config, ), restart_policy="Always", volumes=self.get_pod_volumes( docker_volumes=docker_volumes, aws_ebs_volumes=self.get_aws_ebs_volumes(), ), ), )
def get_template( input_topics, output_topic, error_topic, multiple_inputs=None, multiple_outputs=None, env_prefix="APP_", consumer_group=None, ) -> List[V1EnvVar]: env = [ V1EnvVar(name="ENV_PREFIX", value=env_prefix), V1EnvVar(name=env_prefix + "OUTPUT_TOPIC", value=output_topic), V1EnvVar(name=env_prefix + "ERROR_TOPIC", value=error_topic), ] if input_topics: env.append( V1EnvVar(name=env_prefix + "INPUT_TOPICS", value=input_topics)) if multiple_inputs: env.append( V1EnvVar(name=env_prefix + "EXTRA_INPUT_TOPICS", value=multiple_inputs)) if multiple_outputs: env.append( V1EnvVar(name=env_prefix + "EXTRA_OUTPUT_TOPICS", value=multiple_outputs)) container = V1Container(name="test-container", env=env) pod_spec = V1PodSpec(containers=[container]) spec_metadata = None if consumer_group is not None: spec_metadata = V1ObjectMeta( annotations={"consumerGroup": consumer_group}, ) return V1PodTemplateSpec(spec=pod_spec, metadata=spec_metadata)
def test_sdk_e2e(): container = V1Container( name="xgboost", image="docker.io/merlintang/xgboost-dist-iris:1.1", args=[ "--job_type=Train", "--xgboost_parameter=objective:multi:softprob,num_class:3", "--n_estimators=10", "--learning_rate=0.1", "--model_path=/tmp/xgboost-model", "--model_storage_type=local" ], ) master = V1ReplicaSpec( replicas=1, restart_policy="Never", template=V1PodTemplateSpec(spec=V1PodSpec(containers=[container]))) worker = V1ReplicaSpec( replicas=1, restart_policy="Never", template=V1PodTemplateSpec(spec=V1PodSpec(containers=[container]))) xgboostjob = KubeflowOrgV1XGBoostJob( api_version="kubeflow.org/v1", kind="XGBoostJob", metadata=V1ObjectMeta(name="xgboostjob-iris-ci-test", namespace=SDK_TEST_NAMESPACE), spec=KubeflowOrgV1XGBoostJobSpec(run_policy=V1RunPolicy( clean_pod_policy="None", ), xgb_replica_specs={ "Master": master, "Worker": worker })) XGBOOST_CLIENT.create(xgboostjob) XGBOOST_CLIENT.wait_for_job("xgboostjob-iris-ci-test", namespace=SDK_TEST_NAMESPACE) if not XGBOOST_CLIENT.is_job_succeeded("xgboostjob-iris-ci-test", namespace=SDK_TEST_NAMESPACE): raise RuntimeError("The XGBoostJob is not succeeded.") XGBOOST_CLIENT.get_logs("xgboostjob-iris-ci-test", namespace=SDK_TEST_NAMESPACE) XGBOOST_CLIENT.delete("xgboostjob-iris-ci-test", namespace=SDK_TEST_NAMESPACE)
def get_reference_object(self) -> V1Deployment: """Get deployment object for outpost""" # Generate V1ContainerPort objects container_ports = [] for port in self.controller.deployment_ports: container_ports.append( V1ContainerPort( container_port=port.port, name=port.name, protocol=port.protocol.upper(), )) meta = self.get_object_meta(name=self.name) secret_name = f"authentik-outpost-{self.controller.outpost.uuid.hex}-api" image_prefix = CONFIG.y("outposts.docker_image_base") return V1Deployment( metadata=meta, spec=V1DeploymentSpec( replicas=self.outpost.config.kubernetes_replicas, selector=V1LabelSelector(match_labels=self.get_pod_meta()), template=V1PodTemplateSpec( metadata=V1ObjectMeta(labels=self.get_pod_meta()), spec=V1PodSpec(containers=[ V1Container( name=str(self.outpost.type), image= f"{image_prefix}-{self.outpost.type}:{__version__}", ports=container_ports, env=[ V1EnvVar( name="AUTHENTIK_HOST", value_from=V1EnvVarSource( secret_key_ref=V1SecretKeySelector( name=secret_name, key="authentik_host", )), ), V1EnvVar( name="AUTHENTIK_TOKEN", value_from=V1EnvVarSource( secret_key_ref=V1SecretKeySelector( name=secret_name, key="token", )), ), V1EnvVar( name="AUTHENTIK_INSECURE", value_from=V1EnvVarSource( secret_key_ref=V1SecretKeySelector( name=secret_name, key="authentik_host_insecure", )), ), ], ) ]), ), ), )
def _build_rs(self): pod_spec = V1PodSpec(containers=self.containers, volumes=self.volumes) template = V1PodTemplateSpec(metadata=V1ObjectMeta( labels=self.target_labels, annotations=self.annotations or None), spec=pod_spec) rs_spec = V1beta1ReplicaSetSpec(replicas=self.replicas, selector=self.selector, template=template) rs = V1beta1ReplicaSet(metadata=self.meta, spec=rs_spec) return rs
def test_sdk_e2e(): container = V1Container( name="pytorch", image="gcr.io/kubeflow-ci/pytorch-dist-mnist-test:v1.0", args=["--backend", "gloo"], ) master = V1ReplicaSpec( replicas=1, restart_policy="OnFailure", template=V1PodTemplateSpec(spec=V1PodSpec(containers=[container]))) worker = V1ReplicaSpec( replicas=1, restart_policy="OnFailure", template=V1PodTemplateSpec(spec=V1PodSpec(containers=[container]))) pytorchjob = KubeflowOrgV1PyTorchJob( api_version="kubeflow.org/v1", kind="PyTorchJob", metadata=V1ObjectMeta(name="pytorchjob-mnist-ci-test", namespace=SDK_TEST_NAMESPACE), spec=KubeflowOrgV1PyTorchJobSpec(run_policy=V1RunPolicy( clean_pod_policy="None", ), pytorch_replica_specs={ "Master": master, "Worker": worker })) PYTORCH_CLIENT.create(pytorchjob) PYTORCH_CLIENT.wait_for_job("pytorchjob-mnist-ci-test", namespace=SDK_TEST_NAMESPACE) if not PYTORCH_CLIENT.is_job_succeeded("pytorchjob-mnist-ci-test", namespace=SDK_TEST_NAMESPACE): raise RuntimeError("The PyTorchJob is not succeeded.") PYTORCH_CLIENT.get_logs("pytorchjob-mnist-ci-test", namespace=SDK_TEST_NAMESPACE) PYTORCH_CLIENT.delete("pytorchjob-mnist-ci-test", namespace=SDK_TEST_NAMESPACE)
def __create_app_deployment(self, labels): container_port = V1ContainerPort(container_port=self.container_port) config_map_ref = V1ConfigMapEnvSource(name=INFRA_DB_CONFIG) container = V1Container(name=self.container_name, image=self.image_name, image_pull_policy='IfNotPresent', ports=[container_port], env_from=[V1EnvFromSource(config_map_ref=config_map_ref)]) pod_spec = V1PodSpec(containers=[container]) pod_temp_spec = V1PodTemplateSpec(metadata=V1ObjectMeta(name=self.container_name, labels=labels), spec=pod_spec) deployment_spec = V1DeploymentSpec(replicas=1, selector=V1LabelSelector(match_labels=labels), template=pod_temp_spec) deployment = V1Deployment(metadata=V1ObjectMeta(name=self.container_name), spec=deployment_spec) self.appsApi.create_namespaced_deployment(namespace=TODO_APP_NAMESPACE, body=deployment)
def _create_flush_job( batch_api: BatchV1Api, command: List[str], env: List[V1EnvVar], image: str, name: str, namespace: str, service_account_name: str, ) -> V1Job: logger.info(f"creating job: {name}") try: return batch_api.create_namespaced_job( namespace=namespace, body=V1Job( api_version="batch/v1", kind="Job", metadata=V1ObjectMeta(name=name, namespace=namespace), spec=V1JobSpec( template=V1PodTemplateSpec( spec=V1PodSpec( containers=[ V1Container( image=image, command=command, name="flush", volume_mounts=[ V1VolumeMount(mount_path="/data", name="queue") ], env=env, ) ], restart_policy="OnFailure", volumes=[ V1Volume( name="queue", persistent_volume_claim=( V1PersistentVolumeClaimVolumeSource( claim_name=name ) ), ) ], service_account_name=service_account_name, ) ) ), ), ) except ApiException as e: if e.reason == CONFLICT and json.loads(e.body)["reason"] == ALREADY_EXISTS: logger.info(f"using existing job: {name}") return batch_api.read_namespaced_job(name, namespace) raise
def _create_pytorchjob(pod_spec: dict, job_name='pytorch-operation-job', namespace='kubeflow', worker_num=0): disbale_istio_injection = { 'sidecar.istio.io/inject': "false" } replica_spec = {"Master": V1ReplicaSpec( replicas=1, restart_policy="OnFailure", template=V1PodTemplateSpec( metadata=V1ObjectMeta(annotations=disbale_istio_injection, namespace=namespace), spec=pod_spec ) )} if worker_num > 0: worker = V1ReplicaSpec( replicas=worker_num, restart_policy="OnFailure", template=V1PodTemplateSpec( metadata=V1ObjectMeta(annotations=disbale_istio_injection, namespace=namespace), spec=pod_spec ) ) replica_spec['Worker'] = worker return V1PyTorchJob( api_version="kubeflow.org/v1", kind="PyTorchJob", metadata=V1ObjectMeta(name=job_name, namespace=namespace), spec=V1PyTorchJobSpec( clean_pod_policy="None", pytorch_replica_specs=replica_spec ) )
def __init__(self) -> None: metadata = V1ObjectMeta(name="postgres", labels={"app": "postgres"}) label_selector = V1LabelSelector(match_labels={"app": "postgres"}) env = [V1EnvVar(name="POSTGRES_HOST_AUTH_METHOD", value="trust")] ports = [V1ContainerPort(container_port=5432, name="sql")] volume_mounts = [ V1VolumeMount(name="data", mount_path="/data"), V1VolumeMount( name="postgres-init", mount_path="/docker-entrypoint-initdb.d" ), ] volume_config = V1ConfigMapVolumeSource( name="postgres-init", ) volumes = [V1Volume(name="postgres-init", config_map=volume_config)] container = V1Container( name="postgres", image="postgres:14.3", env=env, ports=ports, volume_mounts=volume_mounts, ) pod_spec = V1PodSpec(containers=[container], volumes=volumes) template_spec = V1PodTemplateSpec(metadata=metadata, spec=pod_spec) claim_templates = [ V1PersistentVolumeClaim( metadata=V1ObjectMeta(name="data"), spec=V1PersistentVolumeClaimSpec( access_modes=["ReadWriteOnce"], resources=V1ResourceRequirements(requests={"storage": "1Gi"}), ), ) ] self.stateful_set = V1StatefulSet( api_version="apps/v1", kind="StatefulSet", metadata=metadata, spec=V1StatefulSetSpec( service_name="postgres", replicas=1, selector=label_selector, template=template_spec, volume_claim_templates=claim_templates, ), )
def _createStatefulSet(self) -> V1beta1StatefulSet: return V1beta1StatefulSet( metadata=self._createMeta(self.name), spec=V1beta1StatefulSetSpec( replicas=3, service_name=self.name, template=V1PodTemplateSpec( metadata=V1ObjectMeta(labels=KubernetesResources. createDefaultLabels(self.name)), spec=V1PodSpec(containers=[ V1Container( name="mongodb", env=[ V1EnvVar(name="POD_IP", value_from=V1EnvVarSource( field_ref=V1ObjectFieldSelector( api_version="v1", field_path="status.podIP"))) ], command=[ "mongod", "--wiredTigerCacheSizeGB", "0.25", "--replSet", self.name, "--bind_ip", "0.0.0.0", "--smallfiles", "--noprealloc" ], image="mongo:3.6.4", ports=[ V1ContainerPort(name="mongodb", container_port=27017, protocol="TCP") ], volume_mounts=[ V1VolumeMount(name="mongo-storage", read_only=False, mount_path="/data/db") ], resources=self._createResourceLimits()) ])), volume_claim_templates=[ V1PersistentVolumeClaim( metadata=V1ObjectMeta(name="mongo-storage"), spec=V1PersistentVolumeClaimSpec( access_modes=["ReadWriteOnce"], resources=V1ResourceRequirements( requests={"storage": "30Gi"}))) ], ), )
def ensure_single_container_deployment(api_apps_v1, container, name, namespace, replicas=1): ensure_deployment( api=api_apps_v1, deployment=V1Deployment( api_version="apps/v1", metadata=V1ObjectMeta(name=name, labels={'app': name}), spec=V1DeploymentSpec( replicas=replicas, selector=V1LabelSelector(match_labels={'app': name}), template=V1PodTemplateSpec( metadata=V1ObjectMeta(name=name, labels={'app': name}), spec=V1PodSpec(containers=[container])))), name=name, namespace=namespace)
def test_sanitize_config_hash(self): mock_config = V1Deployment( metadata=V1ObjectMeta( name='qwe', labels={ 'mc': 'grindah', }, ), spec=V1DeploymentSpec( replicas=2, selector=V1LabelSelector(match_labels={ 'freq': '108.9', }, ), template=V1PodTemplateSpec(), ), ) ret = self.deployment.sanitize_for_config_hash(mock_config) assert 'replicas' not in ret['spec'].keys()
def _build_deployment(self): pod_spec = V1PodSpec(containers=self.containers, volumes=self.volumes, node_name=self.node_name, hostname=self.host_name) template = V1PodTemplateSpec(metadata=V1ObjectMeta( labels=self.target_labels, annotations=self.annotations or None), spec=pod_spec) strategy = AppsV1beta1DeploymentStrategy( type="RollingUpdate", rolling_update=AppsV1beta1RollingUpdateDeployment(max_surge=0)) deployment_spec = AppsV1beta1DeploymentSpec(replicas=self.replicas, selector=self.selector, template=template, strategy=strategy) deployment = AppsV1beta1Deployment(metadata=self.meta, spec=deployment_spec) return deployment
def create_app(self, namespace, name, labels, app): """Create App. :param str namespace: :param str name: :param Dict labels: :param cloudshell.cp.kubernetes.models.deployment_requests. AppDeploymentRequest app: :rtype: AppsV1beta1Deployment """ app_selector = {TagsService.get_default_selector(name): name} labels.update(app_selector) annotations = {} meta = V1ObjectMeta(name=name, labels=labels) template_meta = V1ObjectMeta(labels=labels, annotations=annotations) container = self._prepare_app_container( name=app.name, image=app.image, start_command=app.start_command, environment_variables=app.environment_variables, compute_spec=app.compute_spec, internal_ports=app.internal_ports, external_ports=app.external_ports, ) pod_spec = V1PodSpec(containers=[container]) app_template = V1PodTemplateSpec(metadata=template_meta, spec=pod_spec) app_spec = V1DeploymentSpec( replicas=app.replicas, template=app_template, selector={"matchLabels": app_selector}, ) deployment = V1Deployment(metadata=meta, spec=app_spec) self._logger.info("Creating namespaced deployment for app {}".format(name)) self._logger.debug("Creating namespaced deployment with the following specs:") self._logger.debug(deployment.to_str()) return self._clients.apps_api.create_namespaced_deployment( namespace=namespace, body=deployment, pretty="true" )
def __init__(self) -> None: container = V1Container( name="redpanda", image="vectorized/redpanda:v21.11.13", command=[ "/usr/bin/rpk", "redpanda", "start", "--overprovisioned", "--smp", "1", "--memory", "1G", "--reserve-memory", "0M", "--node-id", "0", "--check=false", "--set", "redpanda.enable_transactions=true", "--set", "redpanda.enable_idempotence=true", "--set", "redpanda.auto_create_topics_enabled=false", "--advertise-kafka-addr", "redpanda:9092", ], ) template = V1PodTemplateSpec( metadata=V1ObjectMeta(labels={"app": "redpanda"}), spec=V1PodSpec(containers=[container]), ) selector = V1LabelSelector(match_labels={"app": "redpanda"}) spec = V1DeploymentSpec(replicas=1, template=template, selector=selector) self.deployment = V1Deployment( api_version="apps/v1", kind="Deployment", metadata=V1ObjectMeta(name="redpanda"), spec=spec, )
def cron_jobs(self): env_prefix = "APP_" envs = [ V1EnvVar(name="ENV_PREFIX", value=env_prefix), V1EnvVar(name=env_prefix + "OUTPUT_TOPIC", value="output-topic"), ] container = V1Container(name="test-container", env=envs) pod_spec = V1PodSpec(containers=[container]) pod_template_spec = V1PodTemplateSpec(spec=pod_spec) job_spec = V1JobSpec( template=pod_template_spec, selector="", ) job_template = V1beta1JobTemplateSpec(spec=job_spec) spec = V1beta1CronJobSpec(job_template=job_template, schedule="* * * * *") return [ V1beta1CronJob(metadata=V1ObjectMeta(name="test-cronjob"), spec=spec) ]
def _create_kube_job(self, op_inst, podspec, namespace=KubernetesConfig.K8S_NAMESPACE): job_name = op_inst.guid + "-job" job_metadata = client.V1ObjectMeta( name=job_name, namespace=namespace, labels={KubernetesConfig.K8S_LABELS_OPGUID: op_inst.guid}) # Label for the service to bind to pod_name = op_inst.guid + "-pod" pod_metadata = client.V1ObjectMeta( name=pod_name, namespace=namespace, labels={KubernetesConfig.K8S_LABELS_OPGUID: op_inst.guid}) # Label for the service to bind to jobspec = V1JobSpec( template=V1PodTemplateSpec(metadata=pod_metadata, spec=podspec)) kube_job = V1Job(metadata=job_metadata, spec=jobspec) return kube_job
def ensure_statefulset_with_containers(api_apps_v1, name, namespace, containers, volume_paths, replicas=1, init_containers=None, volumes=None): if volumes is None: volumes = [] if init_containers is None: init_containers = [] volume_claim_templates = [ V1PersistentVolumeClaim(metadata=V1ObjectMeta(name=path[0]), spec=V1PersistentVolumeClaimSpec( access_modes=['ReadWriteOnce'], resources=V1ResourceRequirements( requests={'storage': path[2]}), storage_class_name=path[3])) for path in volume_paths ] ss = client.V1StatefulSet( api_version="apps/v1", kind="StatefulSet", metadata=client.V1ObjectMeta(name=name, labels={'app': name}), spec=client.V1StatefulSetSpec( replicas=replicas, service_name=name, template=V1PodTemplateSpec( metadata=V1ObjectMeta(labels={"app": name}), spec=V1PodSpec(containers=containers, volumes=volumes, init_containers=init_containers)), selector={'matchLabels': { 'app': name }}, volume_claim_templates=volume_claim_templates)) ensure_statefulset(api_apps_v1, stateful_set=ss, namespace=namespace, name=name)
def _get_pod_template_spec(self, container_command: str, pvc: str, operation: str, metadata: V1ObjectMeta = None) -> V1PodTemplateSpec: """Get a K8s PodTemplateSpec to use with the given command. :param container_command: The command to use in the container. This is passed on. :param pvc: The name of the Kubernetes persistent volume claim to use in the data movement. :param operation: The name of the operation being used. This is used in the creation of the labels for the default pod metadata. If metadata is explicitly provided that will be used and won't contain the 'created-by-operation' label in the metadata. :param metadata: Optional metadata for the PodTemplateSpec. :return: The pod template spec definition for the provided command. """ if metadata is None: metadata = V1ObjectMeta( labels=_get_labels(operation=operation) ) return V1PodTemplateSpec( metadata=metadata, spec=self._get_pod_spec(container_command=container_command, pvc=pvc) )
def get_template( input_topics: Optional[str], output_topic: Optional[str], error_topic: Optional[str], multiple_inputs: Optional[str], multiple_outputs: Optional[str], extra: Dict[str, str], env_prefix: str = "APP_", consumer_group: Optional[str] = None, config_type: ConfigType = ConfigType.ENV, ) -> V1PodTemplateSpec: env = None args = None if config_type == ConfigType.ENV: env = get_env( input_topics, output_topic, error_topic, multiple_inputs, multiple_outputs, env_prefix=env_prefix, extra=extra, ) elif config_type == ConfigType.ARGS: args = get_args( input_topics, output_topic, error_topic, multiple_inputs, multiple_outputs, extra, ) container = V1Container(name="test-container", env=env, args=args) pod_spec = V1PodSpec(containers=[container]) spec_metadata = None if consumer_group is not None: spec_metadata = V1ObjectMeta( annotations={"consumerGroup": consumer_group}, ) return V1PodTemplateSpec(spec=pod_spec, metadata=spec_metadata)
def get_pod_template_spec( self, code_sha: str, system_paasta_config: SystemPaastaConfig, ) -> V1PodTemplateSpec: service_namespace_config = load_service_namespace_config( service=self.service, namespace=self.get_nerve_namespace(), ) docker_volumes = self.get_volumes( system_volumes=system_paasta_config.get_volumes()) return V1PodTemplateSpec( metadata=V1ObjectMeta( labels={ "yelp.com/paasta_service": self.get_service(), "yelp.com/paasta_instance": self.get_instance(), "yelp.com/paasta_git_sha": code_sha, }, annotations={ "smartstack_registrations": json.dumps(self.get_registrations()), }, ), spec=V1PodSpec( service_account_name=self.get_kubernetes_service_account_name( ), containers=self.get_kubernetes_containers( docker_volumes=docker_volumes, aws_ebs_volumes=self.get_aws_ebs_volumes(), system_paasta_config=system_paasta_config, service_namespace_config=service_namespace_config, ), restart_policy="Always", volumes=self.get_pod_volumes( docker_volumes=docker_volumes, aws_ebs_volumes=self.get_aws_ebs_volumes(), ), dns_policy="Default", ), )
def __init__(self, namespace: str = "default", job_spec_template: V1JobSpec = None, print_output: bool = False): """Initialize a DataMoverJob object. :param namespace: The namespace which applies to the job. Defaults to the default namespace. :param job_spec_template: A Kubernetes job spec object. This can be used to configure any of the optional properties of a job spec if desired. It is intended that this can be used as a template to provide optional parameters of the V1JobSpec object. Derived classes should use a copy of the job spec and replace the required template field with the appropriate pod template spec for the particular operation being performed. :param print_output: If True enable information to be printed to the console. Default value is False. """ if namespace is None: self.namespace = "default" else: self.namespace = namespace if job_spec_template is None: self.__job_spec = V1JobSpec(template=V1PodTemplateSpec()) else: self.__job_spec = job_spec_template self.print_output = print_output
def _create_deployment(self, service_name: str, deployment_name: str, docker_config: DockerConfig, shutdown_seconds: int, scale: int, labels: dict[str, str] = None, volumes: list[V1Volume] = None, mounts: list[V1VolumeMount] = None, core_mounts: bool = False, change_key: str = ''): # Build a cache key to check for changes, just trying to only patch what changed # will still potentially result in a lot of restarts due to different kubernetes # systems returning differently formatted data lbls = sorted((labels or {}).items()) svc_env = sorted(self._service_limited_env[service_name].items()) change_key = (f"n={deployment_name}{change_key}dc={docker_config}ss={shutdown_seconds}" f"l={lbls}v={volumes}m={mounts}cm={core_mounts}senv={svc_env}") # Check if a deployment already exists, and if it does check if it has the same change key set replace = None try: replace = self.apps_api.read_namespaced_deployment( deployment_name, namespace=self.namespace, _request_timeout=API_TIMEOUT) if replace.metadata.annotations.get(CHANGE_KEY_NAME) == change_key: if replace.spec.replicas != scale: self.set_target(service_name, scale) return except ApiException as error: if error.status != 404: raise # If we have been given a username or password for the registry, we have to # update it, if we haven't been, make sure its been cleaned up in the system # so we don't leave passwords lying around pull_secret_name = f'{deployment_name}-container-pull-secret' use_pull_secret = False try: current_pull_secret = self.api.read_namespaced_secret(pull_secret_name, self.namespace, _request_timeout=API_TIMEOUT) except ApiException as error: if error.status != 404: raise current_pull_secret = None if docker_config.registry_username or docker_config.registry_password: use_pull_secret = True # Build the secret we want to make new_pull_secret = V1Secret( metadata=V1ObjectMeta(name=pull_secret_name, namespace=self.namespace), type='kubernetes.io/dockerconfigjson', string_data={ '.dockerconfigjson': create_docker_auth_config( image=docker_config.image, username=docker_config.registry_username, password=docker_config.registry_password, ) } ) # Send it to the server if current_pull_secret: self.api.patch_namespaced_secret(pull_secret_name, namespace=self.namespace, body=new_pull_secret, _request_timeout=API_TIMEOUT) else: self.api.create_namespaced_secret(namespace=self.namespace, body=new_pull_secret, _request_timeout=API_TIMEOUT) elif current_pull_secret: self.api.delete_namespaced_secret(pull_secret_name, self.namespace, _request_timeout=API_TIMEOUT) all_labels = dict(self._labels) all_labels['component'] = service_name if core_mounts: all_labels['privilege'] = 'core' all_labels.update(labels or {}) # Build set of volumes, first the global mounts, then the core specific ones, # then the ones specific to this container only all_volumes: list[V1Volume] = [] all_mounts: list[V1VolumeMount] = [] all_volumes.extend(self.config_volumes.values()) all_mounts.extend(self.config_mounts.values()) if core_mounts: all_volumes.extend(self.core_config_volumes.values()) all_mounts.extend(self.core_config_mounts.values()) all_volumes.extend(volumes or []) all_mounts.extend(mounts or []) # Build metadata metadata = V1ObjectMeta(name=deployment_name, labels=all_labels, annotations={CHANGE_KEY_NAME: change_key}) pod = V1PodSpec( volumes=all_volumes, containers=self._create_containers(service_name, deployment_name, docker_config, all_mounts, core_container=core_mounts), priority_class_name=self.priority, termination_grace_period_seconds=shutdown_seconds, security_context=V1PodSecurityContext(fs_group=1000) ) if use_pull_secret: pod.image_pull_secrets = [V1LocalObjectReference(name=pull_secret_name)] template = V1PodTemplateSpec( metadata=metadata, spec=pod, ) spec = V1DeploymentSpec( replicas=int(scale), revision_history_limit=0, selector=V1LabelSelector(match_labels=all_labels), template=template, ) deployment = V1Deployment( kind="Deployment", metadata=metadata, spec=spec, ) if replace: self.logger.info("Requesting kubernetes replace deployment info for: " + metadata.name) try: self.apps_api.replace_namespaced_deployment(namespace=self.namespace, body=deployment, name=metadata.name, _request_timeout=API_TIMEOUT) return except ApiException as error: if error.status == 422: # Replacement of an immutable field (ie. labels); Delete and re-create self.stop_containers(labels=dict(component=service_name)) else: self.logger.info("Requesting kubernetes create deployment info for: " + metadata.name) self.apps_api.create_namespaced_deployment(namespace=self.namespace, body=deployment, _request_timeout=API_TIMEOUT)
def test_format_kubernetes_app_dict(self): with mock.patch( 'paasta_tools.kubernetes_tools.load_system_paasta_config', autospec=True, ) as mock_load_system_config, mock.patch( 'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_docker_url', autospec=True, ) as mock_get_docker_url, mock.patch( 'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_volumes', autospec=True, ) as mock_get_volumes, mock.patch( 'paasta_tools.kubernetes_tools.get_code_sha_from_dockerurl', autospec=True, ) as mock_get_code_sha_from_dockerurl, mock.patch( 'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_sanitised_service_name', autospec=True, return_value='kurupt', ), mock.patch( 'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_sanitised_instance_name', autospec=True, return_value='fm', ), mock.patch( 'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_service', autospec=True, ) as mock_get_service, mock.patch( 'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_instance', autospec=True, ) as mock_get_instance, mock.patch( 'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_instances', autospec=True, ) as mock_get_instances, mock.patch( 'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_deployment_strategy_config', autospec=True, ) as mock_get_deployment_strategy_config, mock.patch( 'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_kubernetes_containers', autospec=True, ) as mock_get_kubernetes_containers, mock.patch( 'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_pod_volumes', autospec=True, return_value=[], ) as mock_get_pod_volumes, mock.patch( 'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_sanitised_volume_name', autospec=True, ), mock.patch( 'paasta_tools.kubernetes_tools.get_config_hash', autospec=True, ) as mock_get_config_hash, mock.patch( 'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.get_force_bounce', autospec=True, ) as mock_get_force_bounce, mock.patch( 'paasta_tools.kubernetes_tools.KubernetesDeploymentConfig.sanitize_for_config_hash', autospec=True, ) as mock_sanitize_for_config_hash: ret = self.deployment.format_kubernetes_app() assert mock_load_system_config.called assert mock_get_docker_url.called assert mock_get_volumes.called assert mock_get_pod_volumes.called mock_get_config_hash.assert_called_with( mock_sanitize_for_config_hash.return_value, force_bounce=mock_get_force_bounce.return_value, ) expected = V1Deployment( metadata=V1ObjectMeta( labels={ 'config_sha': mock_get_config_hash.return_value, 'git_sha': mock_get_code_sha_from_dockerurl.return_value, 'instance': mock_get_instance.return_value, 'service': mock_get_service.return_value, }, name='kurupt-fm', ), spec=V1DeploymentSpec( replicas=mock_get_instances.return_value, selector=V1LabelSelector(match_labels={ 'instance': mock_get_instance.return_value, 'service': mock_get_service.return_value, }, ), strategy=mock_get_deployment_strategy_config.return_value, template=V1PodTemplateSpec( metadata=V1ObjectMeta(labels={ 'config_sha': mock_get_config_hash.return_value, 'git_sha': mock_get_code_sha_from_dockerurl.return_value, 'instance': mock_get_instance.return_value, 'service': mock_get_service.return_value, }, ), spec=V1PodSpec( containers=mock_get_kubernetes_containers. return_value, restart_policy='Always', volumes=[], ), ), ), ) assert ret == expected