def test_job_logs_multiple_pods(self, mock_core_client): namespace = "treesbecomelogs" manager = JobManager( namespace=namespace, signer=Mock(), register=StaticJobDefinitionsRegister() ) job_name = "ahoymatey" pod_name_1, pod_name_2 = "p1", "p2" container_name = "c1" mock_core_client.list_namespaced_pod.return_value.items = [ V1Pod( metadata=V1ObjectMeta(name=pod_name_1), spec=V1PodSpec(containers=[V1Container(name=container_name)]), ), V1Pod( metadata=V1ObjectMeta(name=pod_name_2), spec=V1PodSpec(containers=[V1Container(name=container_name)]), ), ] log_msg = "this is a log" mock_core_client.read_namespaced_pod_log.return_value = log_msg logs = manager.job_logs(job_name) assert logs == { pod_name_1: {container_name: [log_msg]}, pod_name_2: {container_name: [log_msg]}, }
def test_sdk_e2e(): container = V1Container( name="pytorch", image="gcr.io/kubeflow-ci/pytorch-dist-mnist-test:v1.0", args=["--backend", "gloo"], ) master = V1ReplicaSpec( replicas=1, restart_policy="OnFailure", template=V1PodTemplateSpec(spec=V1PodSpec(containers=[container]))) worker = V1ReplicaSpec( replicas=1, restart_policy="OnFailure", template=V1PodTemplateSpec(spec=V1PodSpec(containers=[container]))) pytorchjob = V1PyTorchJob(api_version="kubeflow.org/v1", kind="PyTorchJob", metadata=V1ObjectMeta( name="pytorchjob-mnist-ci-test", namespace='default'), spec=V1PyTorchJobSpec(clean_pod_policy="None", pytorch_replica_specs={ "Master": master, "Worker": worker })) PYTORCH_CLIENT.create(pytorchjob) wait_for_pytorchjob_ready("pytorchjob-mnist-ci-test") PYTORCH_CLIENT.delete('pytorchjob-mnist-ci-test', namespace='default')
def exec_python(kube_ns, kube_client): """Return a callable to execute Python code in a pod in the test namespace This fixture creates a dedicated pod for executing commands """ # note: this was created when there were only single-user pods running, # but now there's always a hub pod where we could be running, # and the ssl case *must* run from the hub pod for access to certs # Note: we could do without this feature if we always ran pod_name = "kubespawner-test-exec" pod_manifest = V1Pod( metadata={"name": pod_name}, spec=V1PodSpec( containers=[ { "image": "python:3.8", "name": "python", "args": ["/bin/sh", "-c", "while true; do sleep 5; done"], } ], termination_grace_period_seconds=0, ), ) pod = create_resource(kube_client, kube_ns, "pod", pod_manifest) yield partial(_exec_python_in_pod, kube_client, kube_ns, pod_name)
def setUp(self): super().setUp() self.cluster_dict = getExampleClusterDefinition() self.cluster_object = V1MongoClusterConfiguration(**self.cluster_dict) self.name = self.cluster_object.metadata.name self.namespace = self.cluster_object.metadata.namespace self.stateful_set = V1beta1StatefulSet( metadata=self._createMeta(self.name), spec=V1beta1StatefulSetSpec( replicas=3, service_name=self.name, template=V1PodTemplateSpec( metadata=V1ObjectMeta(labels=KubernetesResources. createDefaultLabels(self.name)), spec=V1PodSpec(containers=[ V1Container( name="mongodb", env=[ V1EnvVar(name="POD_IP", value_from=V1EnvVarSource( field_ref=V1ObjectFieldSelector( api_version="v1", field_path="status.podIP"))) ], command=[ "mongod", "--replSet", self.name, "--bind_ip", "0.0.0.0", "--smallfiles", "--noprealloc" ], image="mongo:3.6.4", ports=[ V1ContainerPort(name="mongodb", container_port=27017, protocol="TCP") ], volume_mounts=[ V1VolumeMount(name="mongo-storage", read_only=False, mount_path="/data/db") ], resources=V1ResourceRequirements(limits={ "cpu": "100m", "memory": "64Mi" }, requests={ "cpu": "100m", "memory": "64Mi" })) ])), volume_claim_templates=[ V1PersistentVolumeClaim( metadata=V1ObjectMeta(name="mongo-storage"), spec=V1PersistentVolumeClaimSpec( access_modes=["ReadWriteOnce"], resources=V1ResourceRequirements( requests={"storage": "30Gi"}))) ], ), )
def get_streaming_app_cronjob( name: str = "test-cronjob", input_topics: Optional[str] = None, output_topic: Optional[str] = "output-topic", error_topic: Optional[str] = "error-topic", env_prefix: str = "APP_", pipeline: Optional[str] = None, ) -> V1beta1CronJob: env = get_env( input_topics, output_topic, error_topic, env_prefix=env_prefix, ) container = V1Container(name="test-container", env=env) pod_spec = V1PodSpec(containers=[container]) pod_template_spec = V1PodTemplateSpec(spec=pod_spec) job_spec = V1JobSpec( template=pod_template_spec, selector=None, ) job_template = V1beta1JobTemplateSpec(spec=job_spec) spec = V1beta1CronJobSpec(job_template=job_template, schedule="* * * * *") metadata = get_metadata(name, pipeline=pipeline) return V1beta1CronJob(metadata=metadata, spec=spec)
def get_template( input_topics, output_topic, error_topic, multiple_inputs=None, multiple_outputs=None, env_prefix="APP_", consumer_group=None, ) -> List[V1EnvVar]: env = [ V1EnvVar(name="ENV_PREFIX", value=env_prefix), V1EnvVar(name=env_prefix + "OUTPUT_TOPIC", value=output_topic), V1EnvVar(name=env_prefix + "ERROR_TOPIC", value=error_topic), ] if input_topics: env.append( V1EnvVar(name=env_prefix + "INPUT_TOPICS", value=input_topics)) if multiple_inputs: env.append( V1EnvVar(name=env_prefix + "EXTRA_INPUT_TOPICS", value=multiple_inputs)) if multiple_outputs: env.append( V1EnvVar(name=env_prefix + "EXTRA_OUTPUT_TOPICS", value=multiple_outputs)) container = V1Container(name="test-container", env=env) pod_spec = V1PodSpec(containers=[container]) spec_metadata = None if consumer_group is not None: spec_metadata = V1ObjectMeta( annotations={"consumerGroup": consumer_group}, ) return V1PodTemplateSpec(spec=pod_spec, metadata=spec_metadata)
def test_job_logs_not_ready(self, mock_core_client): namespace = "notready" manager = JobManager( namespace=namespace, signer=Mock(), register=StaticJobDefinitionsRegister() ) pod_name = "p" container_name = "c" mock_core_client.list_namespaced_pod.return_value.items = [ V1Pod( metadata=V1ObjectMeta(name=pod_name), spec=V1PodSpec(containers=[V1Container(name=container_name)]), ) ] mock_core_client.read_namespaced_pod_log.side_effect = ApiException( http_resp=Mock( data={ "message": f'container "{container_name}" in pod "{pod_name}" is waiting to start: ContainerCreating' } ) ) # No exception logs = manager.job_logs("whatever") assert logs == {pod_name: {container_name: ["ContainerCreating"]}}
def test_sdk_e2e(): container = V1Container( name="tensorflow", image="gcr.io/kubeflow-ci/tf-mnist-with-summaries:1.0", command=[ "python", "/var/tf_mnist/mnist_with_summaries.py", "--log_dir=/train/logs", "--learning_rate=0.01", "--batch_size=150" ]) worker = V1ReplicaSpec( replicas=1, restart_policy="Never", template=V1PodTemplateSpec(spec=V1PodSpec(containers=[container]))) tfjob = V1TFJob(api_version="kubeflow.org/v1", kind="TFJob", metadata=V1ObjectMeta(name="mnist-ci-test", namespace=SDK_TEST_NAMESPACE), spec=V1TFJobSpec(clean_pod_policy="None", tf_replica_specs={"Worker": worker})) TFJOB_CLIENT.create(tfjob, namespace=SDK_TEST_NAMESPACE) TFJOB_CLIENT.wait_for_job("mnist-ci-test", namespace=SDK_TEST_NAMESPACE) if not TFJOB_CLIENT.if_job_succeeded("mnist-ci-test", namespace=SDK_TEST_NAMESPACE): raise RuntimeError("The TFJob is not succeeded.") TFJOB_CLIENT.delete("mnist-ci-test", namespace=SDK_TEST_NAMESPACE)
def pod_with_preferred_affinity(): return V1Pod( status=V1PodStatus(phase='Pending', conditions=[ V1PodCondition(status='False', type='PodScheduled', reason='Unschedulable') ]), spec=V1PodSpec( containers=[ V1Container( name='container', resources=V1ResourceRequirements(requests={'cpu': '1.5'})) ], affinity=V1Affinity(node_affinity=V1NodeAffinity( required_during_scheduling_ignored_during_execution= V1NodeSelector(node_selector_terms=[ V1NodeSelectorTerm(match_expressions=[ V1NodeSelectorRequirement( key='clusterman.com/scheduler', operator='Exists') ]) ]), preferred_during_scheduling_ignored_during_execution=[ V1PreferredSchedulingTerm( weight=10, preference=V1NodeSelectorTerm(match_expressions=[ V1NodeSelectorRequirement( key='clusterman.com/pool', operator='In', values=['bar']) ])) ]))))
def get_pod_template_spec( self, code_sha: str, system_paasta_config: SystemPaastaConfig, ) -> V1PodTemplateSpec: service_namespace_config = load_service_namespace_config( service=self.service, namespace=self.get_nerve_namespace(), ) docker_volumes = self.get_volumes( system_volumes=system_paasta_config.get_volumes()) return V1PodTemplateSpec( metadata=V1ObjectMeta(labels={ "service": self.get_service(), "instance": self.get_instance(), "git_sha": code_sha, }, ), spec=V1PodSpec( containers=self.get_kubernetes_containers( docker_volumes=docker_volumes, aws_ebs_volumes=self.get_aws_ebs_volumes(), system_paasta_config=system_paasta_config, service_namespace_config=service_namespace_config, ), restart_policy="Always", volumes=self.get_pod_volumes( docker_volumes=docker_volumes, aws_ebs_volumes=self.get_aws_ebs_volumes(), ), ), )
def mock_cluster_connector(): with mock.patch('clusterman.kubernetes.kubernetes_cluster_connector.kubernetes'), \ mock.patch('clusterman.kubernetes.kubernetes_cluster_connector.staticconf'): mock_cluster_connector = KubernetesClusterConnector( 'kubernetes-test', 'bar') mock_cluster_connector._nodes_by_ip = { '10.10.10.1': KubernetesNode(metadata=V1ObjectMeta(name='node1'), status=V1NodeStatus(allocatable={ 'cpu': '4', 'gpu': 2 }, capacity={ 'cpu': '4', 'gpu': '2' })), '10.10.10.2': KubernetesNode(metadata=V1ObjectMeta(name='node2'), status=V1NodeStatus(allocatable={'cpu': '6.5'}, capacity={'cpu': '8'})) } mock_cluster_connector._pods_by_ip = { '10.10.10.1': [], '10.10.10.2': [ V1Pod(metadata=V1ObjectMeta(name='pod1'), status=V1PodStatus(phase='Running'), spec=V1PodSpec(containers=[ V1Container(name='container1', resources=V1ResourceRequirements( requests={'cpu': '1.5'})) ])), ] } return mock_cluster_connector
def create_work_pod(self): broker.coreV1.create_namespaced_pod( namespace='nectar', body=V1Pod( metadata=V1ObjectMeta( name=self.pod_name, labels=self.pod_labels() ), spec=V1PodSpec( restart_policy='Never', containers=[ V1Container( name='docker', image='docker:latest', command=["/bin/sh"], args=["-c", self.command()], env=[ V1EnvVar( name='DOCKER_HOST', value=self.daemon_host() ) ] ) ] ) ) )
def generate_delaying_proxy_deployment(concourse_cfg: ConcourseConfig): ensure_not_none(concourse_cfg) external_url = concourse_cfg.external_url() label = {'app': 'delaying-proxy'} return V1Deployment( kind='Deployment', metadata=V1ObjectMeta(name='delaying-proxy'), spec=V1DeploymentSpec( replicas=1, selector=V1LabelSelector(match_labels=label), template=V1PodTemplateSpec( metadata=V1ObjectMeta(labels=label), spec=V1PodSpec(containers=[ V1Container( image= 'eu.gcr.io/gardener-project/cc/github-enterprise-proxy:0.1.0', image_pull_policy='IfNotPresent', name='delaying-proxy', ports=[ V1ContainerPort(container_port=8080), ], liveness_probe=V1Probe( tcp_socket=V1TCPSocketAction(port=8080), initial_delay_seconds=10, period_seconds=10, ), env=[ V1EnvVar(name='CONCOURSE_URL', value=external_url), ], ), ], ))))
def _get_pod_spec(self, container_command: str, pvc: str) -> V1PodSpec: config_map_volume_names = [] config_map_volumes = [] if self.ca_config_maps: for map_number, config_map in enumerate(self.ca_config_maps): config_map_name = f"ca-config-map-{map_number}" config_map_volumes.append( V1Volume( name=config_map_name, config_map=V1ConfigMapVolumeSource( name=config_map, items=[V1KeyToPath(key="ca_cert", path=config_map)] ) ) ) config_map_volume_names.append(config_map_name) pod_spec = V1PodSpec( containers=[ self._get_container(command=container_command, config_map_volume_names=config_map_volume_names) ], volumes=[ V1Volume(name=self.data_volume_name, persistent_volume_claim=V1PersistentVolumeClaimVolumeSource( claim_name=pvc) ) ] + config_map_volumes, restart_policy="Never" ) return pod_spec
def pending_pods(): return [ ( V1Pod( metadata=V1ObjectMeta(name='pod1'), status=V1PodStatus( phase='Pending', conditions=[ V1PodCondition(status='False', type='PodScheduled', reason='Unschedulable') ], ), spec=V1PodSpec(containers=[ V1Container( name='container1', resources=V1ResourceRequirements(requests={'cpu': '1.5', 'memory': '150MB'}) ), V1Container( name='container1', resources=V1ResourceRequirements(requests={'cpu': '1.5', 'memory': '350MB'}) ) ]), ), PodUnschedulableReason.InsufficientResources, ), ( V1Pod( metadata=V1ObjectMeta(name='pod2'), status=V1PodStatus( phase='Pending', conditions=[ V1PodCondition(status='False', type='PodScheduled', reason='Unschedulable') ], ), spec=V1PodSpec(containers=[ V1Container( name='container1', resources=V1ResourceRequirements(requests={'cpu': '1.5'}) ), V1Container( name='container1', resources=V1ResourceRequirements(requests={'cpu': '1.5', 'mem': '300MB'}) ) ]), ), PodUnschedulableReason.Unknown, ) ]
def test_sdk_e2e(): container = V1Container( name="xgboost", image="docker.io/merlintang/xgboost-dist-iris:1.1", args=[ "--job_type=Train", "--xgboost_parameter=objective:multi:softprob,num_class:3", "--n_estimators=10", "--learning_rate=0.1", "--model_path=/tmp/xgboost-model", "--model_storage_type=local" ], ) master = V1ReplicaSpec( replicas=1, restart_policy="Never", template=V1PodTemplateSpec(spec=V1PodSpec(containers=[container]))) worker = V1ReplicaSpec( replicas=1, restart_policy="Never", template=V1PodTemplateSpec(spec=V1PodSpec(containers=[container]))) xgboostjob = KubeflowOrgV1XGBoostJob( api_version="kubeflow.org/v1", kind="XGBoostJob", metadata=V1ObjectMeta(name="xgboostjob-iris-ci-test", namespace=SDK_TEST_NAMESPACE), spec=KubeflowOrgV1XGBoostJobSpec(run_policy=V1RunPolicy( clean_pod_policy="None", ), xgb_replica_specs={ "Master": master, "Worker": worker })) XGBOOST_CLIENT.create(xgboostjob) XGBOOST_CLIENT.wait_for_job("xgboostjob-iris-ci-test", namespace=SDK_TEST_NAMESPACE) if not XGBOOST_CLIENT.is_job_succeeded("xgboostjob-iris-ci-test", namespace=SDK_TEST_NAMESPACE): raise RuntimeError("The XGBoostJob is not succeeded.") XGBOOST_CLIENT.get_logs("xgboostjob-iris-ci-test", namespace=SDK_TEST_NAMESPACE) XGBOOST_CLIENT.delete("xgboostjob-iris-ci-test", namespace=SDK_TEST_NAMESPACE)
def get_reference_object(self) -> V1Deployment: """Get deployment object for outpost""" # Generate V1ContainerPort objects container_ports = [] for port in self.controller.deployment_ports: container_ports.append( V1ContainerPort( container_port=port.port, name=port.name, protocol=port.protocol.upper(), )) meta = self.get_object_meta(name=self.name) secret_name = f"authentik-outpost-{self.controller.outpost.uuid.hex}-api" image_prefix = CONFIG.y("outposts.docker_image_base") return V1Deployment( metadata=meta, spec=V1DeploymentSpec( replicas=self.outpost.config.kubernetes_replicas, selector=V1LabelSelector(match_labels=self.get_pod_meta()), template=V1PodTemplateSpec( metadata=V1ObjectMeta(labels=self.get_pod_meta()), spec=V1PodSpec(containers=[ V1Container( name=str(self.outpost.type), image= f"{image_prefix}-{self.outpost.type}:{__version__}", ports=container_ports, env=[ V1EnvVar( name="AUTHENTIK_HOST", value_from=V1EnvVarSource( secret_key_ref=V1SecretKeySelector( name=secret_name, key="authentik_host", )), ), V1EnvVar( name="AUTHENTIK_TOKEN", value_from=V1EnvVarSource( secret_key_ref=V1SecretKeySelector( name=secret_name, key="token", )), ), V1EnvVar( name="AUTHENTIK_INSECURE", value_from=V1EnvVarSource( secret_key_ref=V1SecretKeySelector( name=secret_name, key="authentik_host_insecure", )), ), ], ) ]), ), ), )
def running_pod_1(): return V1Pod(metadata=V1ObjectMeta(name='running_pod_1'), status=V1PodStatus(phase='Running', host_ip='10.10.10.2'), spec=V1PodSpec(containers=[ V1Container(name='container1', resources=V1ResourceRequirements( requests={'cpu': '1.5'})) ], node_selector={'clusterman.com/pool': 'bar'}))
def from_file(cls, pod_dict): """ expects input in format: { "name": "bonita-webapp-0", "namespace": "default", "containers": { "limits": { "memory": "24Gi" }, "requests": { "cpu": "3", "memory": "12Gi" } }, "initContainers": null }, """ containers = pod_dict['containers'] if containers is not None: c_limits = containers.get('limits') c_requests = containers.get('requests') else: c_limits, c_requests = {}, {} init_containers = pod_dict['initContainers'] if init_containers is not None: ic_limits = containers.get('limits') ic_requests = containers.get('requests') else: ic_limits, ic_requests = {}, {} pod = V1Pod( metadata=V1ObjectMeta( name=pod_dict['name'], namespace=pod_dict['namespace'] ), spec=V1PodSpec( containers=[ V1Container( name='1', resources=V1ResourceRequirements( limits=c_limits, requests=c_requests ) ) ], init_containers=[ V1Container( name='1', resources=V1ResourceRequirements( limits=ic_limits, requests=ic_requests )) ] ) ) return cls.from_k8s(pod)
def deploy(self, restart_policy="Never"): spec = V1PodSpec(containers=self.containers, node_name=self.node_name, volumes=self.volumes, restart_policy=restart_policy) self.meta.labels = self.target_labels pod = V1Pod(spec=spec, metadata=self.meta) return k8sclient.apiV1.create_namespaced_pod(self.meta.namespace, body=pod)
def _build_rs(self): pod_spec = V1PodSpec(containers=self.containers, volumes=self.volumes) template = V1PodTemplateSpec(metadata=V1ObjectMeta( labels=self.target_labels, annotations=self.annotations or None), spec=pod_spec) rs_spec = V1beta1ReplicaSetSpec(replicas=self.replicas, selector=self.selector, template=template) rs = V1beta1ReplicaSet(metadata=self.meta, spec=rs_spec) return rs
def test_sdk_e2e(): container = V1Container( name="pytorch", image="gcr.io/kubeflow-ci/pytorch-dist-mnist-test:v1.0", args=["--backend", "gloo"], ) master = V1ReplicaSpec( replicas=1, restart_policy="OnFailure", template=V1PodTemplateSpec(spec=V1PodSpec(containers=[container]))) worker = V1ReplicaSpec( replicas=1, restart_policy="OnFailure", template=V1PodTemplateSpec(spec=V1PodSpec(containers=[container]))) pytorchjob = KubeflowOrgV1PyTorchJob( api_version="kubeflow.org/v1", kind="PyTorchJob", metadata=V1ObjectMeta(name="pytorchjob-mnist-ci-test", namespace=SDK_TEST_NAMESPACE), spec=KubeflowOrgV1PyTorchJobSpec(run_policy=V1RunPolicy( clean_pod_policy="None", ), pytorch_replica_specs={ "Master": master, "Worker": worker })) PYTORCH_CLIENT.create(pytorchjob) PYTORCH_CLIENT.wait_for_job("pytorchjob-mnist-ci-test", namespace=SDK_TEST_NAMESPACE) if not PYTORCH_CLIENT.is_job_succeeded("pytorchjob-mnist-ci-test", namespace=SDK_TEST_NAMESPACE): raise RuntimeError("The PyTorchJob is not succeeded.") PYTORCH_CLIENT.get_logs("pytorchjob-mnist-ci-test", namespace=SDK_TEST_NAMESPACE) PYTORCH_CLIENT.delete("pytorchjob-mnist-ci-test", namespace=SDK_TEST_NAMESPACE)
def __init__(self) -> None: metadata = V1ObjectMeta(name="testdrive") container = V1Container( name="testdrive", image=self.image("testdrive"), command=["sleep", "infinity"], ) pod_spec = V1PodSpec(containers=[container]) self.pod = V1Pod(metadata=metadata, spec=pod_spec)
def __create_app_deployment(self, labels): container_port = V1ContainerPort(container_port=self.container_port) config_map_ref = V1ConfigMapEnvSource(name=INFRA_DB_CONFIG) container = V1Container(name=self.container_name, image=self.image_name, image_pull_policy='IfNotPresent', ports=[container_port], env_from=[V1EnvFromSource(config_map_ref=config_map_ref)]) pod_spec = V1PodSpec(containers=[container]) pod_temp_spec = V1PodTemplateSpec(metadata=V1ObjectMeta(name=self.container_name, labels=labels), spec=pod_spec) deployment_spec = V1DeploymentSpec(replicas=1, selector=V1LabelSelector(match_labels=labels), template=pod_temp_spec) deployment = V1Deployment(metadata=V1ObjectMeta(name=self.container_name), spec=deployment_spec) self.appsApi.create_namespaced_deployment(namespace=TODO_APP_NAMESPACE, body=deployment)
def unevictable_pod(): return V1Pod(metadata=V1ObjectMeta( name='unevictable_pod', annotations={'clusterman.com/safe_to_evict': 'false'}, owner_references=[]), status=V1PodStatus(phase='Running', host_ip='10.10.10.2'), spec=V1PodSpec(containers=[ V1Container(name='container1', resources=V1ResourceRequirements( requests={'cpu': '1.5'})) ]))
def create(self): pod = V1Pod(api_version='v1', metadata=V1ObjectMeta(name=self.pod_name, labels=self.labels()), spec=V1PodSpec(containers=[ V1Container(name="primary", image=self.image(), image_pull_policy="Always") ])) return broker.coreV1.create_namespaced_pod(body=pod, namespace=self.namespace)
def _create_flush_job( batch_api: BatchV1Api, command: List[str], env: List[V1EnvVar], image: str, name: str, namespace: str, service_account_name: str, ) -> V1Job: logger.info(f"creating job: {name}") try: return batch_api.create_namespaced_job( namespace=namespace, body=V1Job( api_version="batch/v1", kind="Job", metadata=V1ObjectMeta(name=name, namespace=namespace), spec=V1JobSpec( template=V1PodTemplateSpec( spec=V1PodSpec( containers=[ V1Container( image=image, command=command, name="flush", volume_mounts=[ V1VolumeMount(mount_path="/data", name="queue") ], env=env, ) ], restart_policy="OnFailure", volumes=[ V1Volume( name="queue", persistent_volume_claim=( V1PersistentVolumeClaimVolumeSource( claim_name=name ) ), ) ], service_account_name=service_account_name, ) ) ), ), ) except ApiException as e: if e.reason == CONFLICT and json.loads(e.body)["reason"] == ALREADY_EXISTS: logger.info(f"using existing job: {name}") return batch_api.read_namespaced_job(name, namespace) raise
def create(subs): pod = broker.client.V1Pod(api_version='v1', metadata=V1ObjectMeta(name=subs.get('name'), labels=subs.get('labels')), spec=V1PodSpec(containers=[ V1Container(name="primary", image=subs.get('image', 'nginx'), image_pull_policy="Always") ])) return broker.coreV1.create_namespaced_pod(body=pod, namespace=subs.get('ns'))
def create_test_pod(): core_v1.create_namespaced_pod( "default", V1Pod( metadata=V1ObjectMeta(name=name, ), spec=V1PodSpec(containers=[ V1Container( name="test", image="alpine", tty=True, ) ]), ))
def __init__(self) -> None: metadata = V1ObjectMeta(name="postgres", labels={"app": "postgres"}) label_selector = V1LabelSelector(match_labels={"app": "postgres"}) env = [V1EnvVar(name="POSTGRES_HOST_AUTH_METHOD", value="trust")] ports = [V1ContainerPort(container_port=5432, name="sql")] volume_mounts = [ V1VolumeMount(name="data", mount_path="/data"), V1VolumeMount( name="postgres-init", mount_path="/docker-entrypoint-initdb.d" ), ] volume_config = V1ConfigMapVolumeSource( name="postgres-init", ) volumes = [V1Volume(name="postgres-init", config_map=volume_config)] container = V1Container( name="postgres", image="postgres:14.3", env=env, ports=ports, volume_mounts=volume_mounts, ) pod_spec = V1PodSpec(containers=[container], volumes=volumes) template_spec = V1PodTemplateSpec(metadata=metadata, spec=pod_spec) claim_templates = [ V1PersistentVolumeClaim( metadata=V1ObjectMeta(name="data"), spec=V1PersistentVolumeClaimSpec( access_modes=["ReadWriteOnce"], resources=V1ResourceRequirements(requests={"storage": "1Gi"}), ), ) ] self.stateful_set = V1StatefulSet( api_version="apps/v1", kind="StatefulSet", metadata=metadata, spec=V1StatefulSetSpec( service_name="postgres", replicas=1, selector=label_selector, template=template_spec, volume_claim_templates=claim_templates, ), )