def create(self): cmd = "while true; do echo $(date -u) >> /data/out.txt; sleep 1; done" if self.out_put != "": cmd = "while true; do echo $(date -u) >> /data/{}; sleep 1; done".format(self.out_put) container = client.V1Container( name="app", image="centos", command=["sh", "-c", cmd], volume_mounts=[client.V1VolumeMount( name="juicefs-pv", mount_path="/data", mount_propagation="HostToContainer", )] ) pod = client.V1Pod( metadata=client.V1ObjectMeta(name=self.name, namespace=self.namespace), spec=client.V1PodSpec( containers=[container], volumes=[client.V1Volume( name="juicefs-pv", persistent_volume_claim=client.V1PersistentVolumeClaimVolumeSource(claim_name=self.pvc) )]), ) client.CoreV1Api().create_namespaced_pod(namespace=self.namespace, body=pod) PODS.append(self)
def add_label_to_pod(self, name, label, value): namespace = self._find_pod_namespace(name) body = client.V1Pod(metadata=client.V1ObjectMeta( labels={label: value})) return self._v1.patch_namespaced_pod(name, namespace, body)
def to_pod(self): image = self._image_path() or 'daskdev/dask:latest' env = self.spec.env namespace = self.metadata.namespace or config.namespace if self.spec.extra_pip: env.append(self.spec.extra_pip) container = client.V1Container(name='base', image=image, env=env, command=None, args=self.spec.args, image_pull_policy=self.spec.image_pull_policy, volume_mounts=self.spec.volume_mounts, resources=self.spec.resources) pod_spec = client.V1PodSpec(containers=[container], restart_policy='Never', volumes=self.spec.volumes, service_account=self.spec.service_account) meta = client.V1ObjectMeta(namespace=namespace, labels=self.metadata.labels, annotations=self.metadata.annotations) pod = client.V1Pod(metadata=meta, spec=pod_spec) return pod
def make_pod_spec(app_id): pod_name = 'app-{app_id}-{uuid}'.format(app_id=app_id, uuid=uuid().hex[:8]) args = [ 'bokeh', 'serve', '--port', str(BOKEH_PORT), '/s3/jubo-apps/{}'.format(app_id) ] s3_volume = client.V1Volume( name='s3', flex_volume=client.V1FlexVolumeSource( driver="informaticslab/s3-fuse-flex-volume", options={'readonly': "true"})) pod = client.V1Pod( metadata=client.V1ObjectMeta( name=pod_name, labels={MARK_AS_REAPABLE_LABEL: MARK_AS_REAPABLE_LABEL_VALUE}), spec=client.V1PodSpec( restart_policy='Never', containers=[ client.V1Container( name=pod_name, image='informaticslab/singleuser-notebook:latest', args=args, ports=[ client.V1ContainerPort(container_port=BOKEH_PORT, name='bokeh') ], volume_mounts=[ client.V1VolumeMount(name='s3', mount_path='/s3') ]) ], volumes=[s3_volume])) return pod
def create_iperf(self, name='server'): self.core_v1_api.create_namespaced_pod( namespace="default", body=client.V1Pod( api_version="v1", kind="Pod", metadata=client.V1ObjectMeta( name=name, namespace="default" ), spec=client.V1PodSpec( containers=[ client.V1Container( name="iperf", tty=True, image="zhuangweikang/k8stc:latest", image_pull_policy="IfNotPresent", security_context=client.V1SecurityContext( capabilities=client.V1Capabilities(add=["NET_ADMIN"])), resources=client.V1ResourceRequirements( limits={"cpu": "100m", "memory": "1Gi"}, requests={"cpu": "100m", "memory": "1Gi"}) ) ], restart_policy="Never" ) ), async_req=False)
def _run(self, runobj: RunObject, execution): with_mlrun = self.spec.mode != 'pass' command, args, extra_env = self._get_cmd_args(runobj, with_mlrun) extra_env = [{'name': k, 'value': v} for k, v in extra_env.items()] if not self._is_built: ready = self._build_image(True, with_mlrun, execution) if not ready: raise RunError("can't run task, image is not built/ready") k8s = self._get_k8s() execution.set_state('submit') new_meta = self._get_meta(runobj) pod_spec = func_to_pod(self._image_path(), self, extra_env, command, args) pod = client.V1Pod(metadata=new_meta, spec=pod_spec) try: pod_name, namespace = k8s.create_pod(pod) except client.rest.ApiException as e: raise RunError(str(e)) status = 'unknown' if pod_name: status = k8s.watch(pod_name, namespace) if self._db_conn and pod_name: project = runobj.metadata.project or '' self._db_conn.store_log(new_meta.uid, project, k8s.logs(pod_name, namespace)) if status in ['failed', 'error']: raise RunError(f'pod exited with {status}, check logs') return None
def create_pod_object(name: str = "default", imagename: str = None, labels: {} = None, state: str = "running", namespace: str = "default", node_name='node1') -> k8sClient.V1Pod: container_state = k8sClient.V1ContainerState(running=MagicMock()) if state == "terminated": container_state = k8sClient.V1ContainerState(terminated=MagicMock()) image = k8sClient.V1ContainerImage(names=[imagename]) container_status = k8sClient.V1ContainerStatus(state=container_state, image=image, image_id="fakeimage", name="fakename", ready="True", restart_count=0) condition = k8sClient.V1PodCondition(type="Ready", status=[container_status]) status = k8sClient.V1PodStatus(conditions=[condition], container_statuses=[container_status]) container = k8sClient.V1Container(image=image, name="fakename1") spec = k8sClient.V1PodSpec(containers=[container], node_name=node_name) metadata = k8sClient.V1ObjectMeta(name=name, labels=labels, namespace=namespace) node = k8sClient.V1Pod(status=status, spec=spec, metadata=metadata) return node
def _run(self, runobj: RunObject, execution): with_mlrun = (not self.spec.mode) or (self.spec.mode != "pass") command, args, extra_env = self._get_cmd_args(runobj, with_mlrun) extra_env = [{"name": k, "value": v} for k, v in extra_env.items()] if runobj.metadata.iteration: self.store_run(runobj) k8s = self._get_k8s() new_meta = self._get_meta(runobj) if self._secrets and self._secrets.has_vault_source(): self._add_vault_params_to_spec(runobj) pod_spec = func_to_pod(self.full_image_path(), self, extra_env, command, args, self.spec.workdir) pod = client.V1Pod(metadata=new_meta, spec=pod_spec) try: pod_name, namespace = k8s.create_pod(pod) except ApiException as exc: raise RunError(str(exc)) if pod_name and self.kfp: writer = AsyncLogWriter(self._db_conn, runobj) status = k8s.watch(pod_name, namespace, writer=writer) if status in ["failed", "error"]: raise RunError(f"pod exited with {status}, check logs") else: txt = f"Job is running in the background, pod: {pod_name}" logger.info(txt) runobj.status.status_text = txt return None
def test_pod_status_evicted_pod() -> None: client_pod_status = client.V1PodStatus( conditions=None, container_statuses=None, ephemeral_container_statuses=None, host_ip=None, init_container_statuses=None, message="The node was low on resource: ephemeral-storage. Container " "grafana-sc-dashboard was using 4864Ki, which exceeds its request " "of 0. Container grafana was using 2112Ki, which exceeds its " "request of 0. Container grafana-sc-datasources was using 1280Ki, " "which exceeds its request of 0. ", nominated_node_name=None, phase="Failed", pod_i_ps=None, pod_ip=None, qos_class=None, reason="Evicted", start_time=datetime.datetime(2022, 5, 23, 5, 43, 57, tzinfo=tzutc()), ) client_pod = client.V1Pod(status=client_pod_status) assert pod_status(client_pod) == api.PodStatus( conditions=None, phase=api.Phase.FAILED, start_time=api.Timestamp(1653284637.0), host_ip=None, pod_ip=None, qos_class=None, )
def get_pod(self, task_type, task_idx, volume_mounts, volumes, env_vars=None, command=None, args=None, sidecar_args=None, resources=None, restart_policy=None): job_name = self.get_job_name(task_type=task_type, task_idx=task_idx) labels = self.get_labels(task_type=task_type, task_idx=task_idx) metadata = client.V1ObjectMeta(name=job_name, labels=labels, namespace=self.namespace) pod_spec = self.get_task_pod_spec(task_type=task_type, task_idx=task_idx, volume_mounts=volume_mounts, volumes=volumes, env_vars=env_vars, command=command, args=args, sidecar_args=sidecar_args, resources=resources, restart_policy=restart_policy) return client.V1Pod(api_version=k8s_constants.K8S_API_VERSION_V1, kind=k8s_constants.K8S_POD_KIND, metadata=metadata, spec=pod_spec)
def testToSwaggerDict(self): pod = client.V1Pod( metadata=client.V1ObjectMeta(owner_references=[ client.V1OwnerReference( api_version='argoproj.io/v1alpha1', kind='Workflow', name='wf-1', uid='wf-uid-1') ]), spec=client.V1PodSpec(containers=[], service_account='sa-1')) pod_dict = container_common.to_swagger_dict(pod) self.assertDictEqual( { 'metadata': { 'ownerReferences': [{ 'apiVersion': 'argoproj.io/v1alpha1', 'kind': 'Workflow', 'name': 'wf-1', 'uid': 'wf-uid-1' }] }, 'spec': { 'serviceAccount': 'sa-1' } }, pod_dict)
def create_pod(self, namespace='default', name=None, metadata=None, spec=None): ''' metadata : dict to create V1ObjectMeta {'name': 'xyz','namespace':'abc'} spec : dict to create V1PodSpec object Ex : { 'containers' : [ { 'image' : 'busybox', 'command': ['sleep', '3600'], 'name' : 'busybox_container' 'image_pull_policy': 'IfNotPresent', }, 'restart_policy' : 'Always' } namespace: Namespace in which POD to be created name: Name of the POD containers_list: List of dict specify the details of container. format [{'pod_name':'value','image':'value'}] return V1Pod instance ''' if metadata is None: metadata = {} if spec is None: spec = {} metadata_obj = self._get_metadata(metadata) if name: metadata_obj.name = name spec_obj = self._get_pod_spec(metadata_obj.name, spec) body = client.V1Pod(metadata=metadata_obj, spec=spec_obj) self.logger.info('Creating Pod %s' % (metadata_obj.name)) resp = self.v1_h.create_namespaced_pod(namespace, body) return resp
def build(self): dockerfile_path = dockerfile.write_dockerfile( dockerfile_path=self.dockerfile_path, path_prefix=self.preprocessor.path_prefix, base_image=self.base_image) self.preprocessor.output_map[dockerfile_path] = 'Dockerfile' context_path, context_hash = self.preprocessor.context_tar_gz() self.image_tag = self.full_image_name(context_hash) self.context_source.prepare(context_path) labels = {'fairing-builder': 'kaniko'} pod_spec = self.context_source.generate_pod_spec( self.image_tag, self.push) for fn in self.pod_spec_mutators: fn(self.manager, pod_spec, self.namespace) build_pod = client.V1Pod(api_version="v1", kind="Pod", metadata=client.V1ObjectMeta( generate_name="fairing-builder-", labels=labels, namespace=self.namespace, ), spec=pod_spec) created_pod = client. \ CoreV1Api(). \ create_namespaced_pod("default", build_pod) self.manager.log(name=created_pod.metadata.name, namespace=created_pod.metadata.namespace, selectors=labels) # clean up created pod and secret self.context_source.cleanup() client.CoreV1Api().delete_namespaced_pod( created_pod.metadata.name, created_pod.metadata.namespace, client.V1DeleteOptions())
def _run(self, runobj: RunObject, execution): with_mlrun = (not self.spec.mode) or (self.spec.mode != 'pass') command, args, extra_env = self._get_cmd_args(runobj, with_mlrun) extra_env = [{'name': k, 'value': v} for k, v in extra_env.items()] if runobj.metadata.iteration: self.store_run(runobj) k8s = self._get_k8s() new_meta = self._get_meta(runobj) pod_spec = func_to_pod(self.full_image_path(), self, extra_env, command, args) pod = client.V1Pod(metadata=new_meta, spec=pod_spec) try: pod_name, namespace = k8s.create_pod(pod) except client.rest.ApiException as e: raise RunError(str(e)) if pod_name and self.kfp: writer = AsyncLogWriter(self._db_conn, runobj) status = k8s.watch(pod_name, namespace, writer=writer) if status in ['failed', 'error']: raise RunError(f'pod exited with {status}, check logs') else: txt = 'Job is running in the background, pod: {}'.format(pod_name) logger.info(txt) runobj.status.status_text = txt return None
def touch_member(self, data, ttl=None, permanent=False): cluster = self.cluster if cluster and cluster.leader and cluster.leader.name == self._name: role = 'promoted' if data['role'] in ('replica', 'promoted') else 'master' elif data['state'] == 'running' and data['role'] != 'master': role = data['role'] else: role = None member = cluster and cluster.get_member(self._name, fallback_to_leader=False) pod_labels = member and member.data.pop('pod_labels', None) ret = pod_labels is not None and pod_labels.get( self._role_label) == role and deep_compare(data, member.data) if not ret: metadata = { 'namespace': self._namespace, 'name': self._name, 'labels': { self._role_label: role }, 'annotations': { 'status': json.dumps(data, separators=(',', ':')) } } body = k8s_client.V1Pod(metadata=k8s_client.V1ObjectMeta( **metadata)) ret = self._api.patch_namespaced_pod(self._name, self._namespace, body) if self.__subsets and self._should_create_config_service: self._create_config_service() return ret
def test_clusterinit_update_pod_with_init_container(): pod_passed = k8sclient.V1Pod( metadata=k8sclient.V1ObjectMeta(annotations={}), spec=k8sclient.V1PodSpec(), status=k8sclient.V1PodStatus()).to_dict() cmd = "cmd" cmk_img = "cmk_img" cmk_img_pol = "policy" args = "argument" clusterinit.update_pod_with_init_container(pod_passed, cmd, cmk_img, cmk_img_pol, args) pods = json.loads(pod_passed["metadata"]["annotations"] ["pod.beta.kubernetes.io/init-containers"]) assert len(pods) == 1 assert pods[0]["name"] == cmd assert pods[0]["image"] == cmk_img assert pods[0]["imagePullPolicy"] == cmk_img_pol assert args in pods[0]["args"] second_cmd = "cmd2" second_img = cmk_img second_img_pol = "Always" second_args = ["arg1", "arg2"] clusterinit.update_pod_with_init_container(pod_passed, second_cmd, second_img, second_img_pol, second_args) pods = json.loads(pod_passed["metadata"]["annotations"] ["pod.beta.kubernetes.io/init-containers"]) assert len(pods) == 2
def _create_k8s_job(self, yaml_spec): """ _create_k8s_job creates a kubernetes job based on the yaml spec """ pod = k8s_client.V1Pod(metadata=k8s_client.V1ObjectMeta(generate_name=yaml_spec['metadata']['generateName'])) container = k8s_client.V1Container(name = yaml_spec['spec']['containers'][0]['name'], image = yaml_spec['spec']['containers'][0]['image'], args = yaml_spec['spec']['containers'][0]['args'], volume_mounts = [k8s_client.V1VolumeMount( name=yaml_spec['spec']['containers'][0]['volumeMounts'][0]['name'], mount_path=yaml_spec['spec']['containers'][0]['volumeMounts'][0]['mountPath'], )], env = [k8s_client.V1EnvVar( name=yaml_spec['spec']['containers'][0]['env'][0]['name'], value=yaml_spec['spec']['containers'][0]['env'][0]['value'], )]) pod.spec = k8s_client.V1PodSpec(restart_policy=yaml_spec['spec']['restartPolicy'], containers = [container], service_account_name=yaml_spec['spec']['serviceAccountName'], volumes=[k8s_client.V1Volume( name=yaml_spec['spec']['volumes'][0]['name'], secret=k8s_client.V1SecretVolumeSource( secret_name=yaml_spec['spec']['volumes'][0]['secret']['secretName'], ) )]) try: api_response = self._corev1.create_namespaced_pod(yaml_spec['metadata']['namespace'], pod) return api_response.metadata.name, True except k8s_client.rest.ApiException as e: logging.exception("Exception when calling CoreV1Api->create_namespaced_pod: {}\n".format(str(e))) return '', False
def _submit(k8s, pod_spec, metadata): pod = client.V1Pod(metadata=metadata, spec=pod_spec) try: return k8s.create_pod(pod) except client.rest.ApiException as e: print(str(e)) raise RunError(str(e))
def test_pod_containers_pod_without_node(self) -> None: pod = client.V1Pod( status=client.V1PodStatus(container_statuses=None, )) container_info_api_list = pod_containers(pod) self.assertEqual(container_info_api_list, {})
def submit(self): """Submit a image spec to openshift's s2i and wait for completion """ volume_mounts = [ client.V1VolumeMount(mount_path="/var/run/docker.sock", name="docker-socket") ] volumes = [ client.V1Volume(name="docker-socket", host_path=client.V1HostPathVolumeSource( path="/var/run/docker.sock")) ] if self.push_secret: volume_mounts.append( client.V1VolumeMount(mount_path="/root/.docker", name='docker-push-secret')) volumes.append( client.V1Volume(name='docker-push-secret', secret=client.V1SecretVolumeSource( secret_name=self.push_secret))) self.pod = client.V1Pod(metadata=client.V1ObjectMeta( name=self.name, labels={"name": self.name}), spec=client.V1PodSpec(containers=[ client.V1Container( image=self.builder_image, name="builder", args=self.get_cmd(), image_pull_policy='Always', volume_mounts=volume_mounts, ) ], volumes=volumes, restart_policy="Never")) try: ret = self.api.create_namespaced_pod(self.namespace, self.pod) except client.rest.ApiException as e: if e.status == 409: # Someone else created it! pass else: raise w = watch.Watch() try: for f in w.stream(self.api.list_namespaced_pod, self.namespace, label_selector="name={}".format(self.name)): if f['type'] == 'DELETED': self.progress('pod.phasechange', 'Deleted') return self.pod = f['object'] self.progress('pod.phasechange', self.pod.status.phase) if self.pod.status.phase == 'Succeeded': self.cleanup() elif self.pod.status.phase == 'Failed': self.cleanup() finally: w.stop()
def create_pod_object(): # Configureate Pod template container container = client.V1Container( name=DEPLOYMENT_NAME, image=IMAGE_NAME, ports=[client.V1ContainerPort(container_port=PORT_NO)]) # Create and configurate a spec section template = client.V1PodTemplateSpec( metadata=client.V1ObjectMeta(labels={"name": APP_NAME}), spec=client.V1PodSpec(containers=[container])) """ template = client.V1PodTemplateSpec( spec=client.V1PodSpec(containers=[container])) """ # Create the specification of deployment spec = client.V1PodSpec(containers=[container]) # template=template) # Instantiate the deployment object pod_object = client.V1Pod( api_version=API_VERSION, kind=KIND, metadata=client.V1ObjectMeta(name=DEPLOYMENT_NAME), spec=spec) return pod_object
def testLaunch_loadInClusterSucceed(self, mock_core_api_cls, mock_incluster_config, mock_publisher, mock_is_inside_kfp, mock_kfp_namespace): mock_publisher.return_value.publish_execution.return_value = {} core_api = mock_core_api_cls.return_value core_api.read_namespaced_pod.side_effect = [ self._mock_launcher_pod(), client.rest.ApiException( status=404), # Mock no existing pod state. self._mock_executor_pod( 'Pending'), # Mock pending state after creation. self._mock_executor_pod( 'Running'), # Mock running state after pending. self._mock_executor_pod('Succeeded'), # Mock Succeeded state. ] # Mock successful pod creation. core_api.create_namespaced_pod.return_value = client.V1Pod() core_api.read_namespaced_pod_log.return_value.stream.return_value = [ b'log-1' ] context = self._create_launcher_context() context['launcher'].launch() self.assertEqual(5, core_api.read_namespaced_pod.call_count) core_api.create_namespaced_pod.assert_called_once() core_api.read_namespaced_pod_log.assert_called_once() _, mock_kwargs = core_api.create_namespaced_pod.call_args self.assertEqual(_KFP_NAMESPACE, mock_kwargs['namespace']) pod_manifest = mock_kwargs['body'] self.assertDictEqual( { 'apiVersion': 'v1', 'kind': 'Pod', 'metadata': { 'name': 'test-123-fakecomponent-fakecomponent-123', 'ownerReferences': [{ 'apiVersion': 'argoproj.io/v1alpha1', 'kind': 'Workflow', 'name': 'wf-1', 'uid': 'wf-uid-1' }] }, 'spec': { 'restartPolicy': 'Never', 'containers': [{ 'name': 'main', 'image': 'gcr://test', 'command': None, 'args': [context['input_artifact'].uri], }], 'serviceAccount': 'sa-1', 'serviceAccountName': None } }, pod_manifest)
def get_pod(self, volume_mounts, volumes, env_vars=None, command=None, args=None, resources=None, node_selector=None, restart_policy=None): metadata = client.V1ObjectMeta(name=self.k8s_job_name, labels=self.labels, namespace=self.namespace) pod_spec = self.get_task_pod_spec( volume_mounts=volume_mounts, volumes=volumes, env_vars=env_vars, command=command, args=args, resources=resources, node_selector=node_selector, restart_policy=restart_policy) return client.V1Pod(api_version=k8s_constants.K8S_API_VERSION_V1, kind=k8s_constants.K8S_POD_KIND, metadata=metadata, spec=pod_spec)
def _submit(k8s, runtime, metadata, extra_env): pod_spec = func_to_pod(image_path(runtime.image), runtime, extra_env) pod = client.V1Pod(metadata=metadata, spec=pod_spec) try: return k8s.create_pod(pod) except client.rest.ApiException as e: print(str(e)) raise RunError(str(e))
def test_filterIpedWorkers(self): status = client.V1PodStatus(host_ip="1.2.3.4", pod_ip='6.7.8.9') metadata = client.V1ObjectMeta(name='notipedworker-myname') spec = client.V1PodSpec(node_name='sardcloudXX', containers=[]) pods = [client.V1Pod(status=status, metadata=metadata, spec=spec)] podList = client.V1PodList(items=pods) workers: List[IPEDWorker] = _listWorkers(podList) self.assertEqual(len(workers), 0)
def _create_pod(self, **kargs): # Container pod_resource_requests = kargs["resource_requests"] pod_resource_limits = kargs["resource_limits"] pod_resource_limits = (pod_resource_limits if pod_resource_limits else pod_resource_requests) container = client.V1Container( name=kargs["pod_name"], image=kargs["image_name"], command=kargs["command"], resources=client.V1ResourceRequirements( requests=parse_resource(pod_resource_requests), limits=parse_resource(pod_resource_limits), ), args=kargs["container_args"], image_pull_policy=kargs["image_pull_policy"], env=kargs["env"], ) # Pod spec = client.V1PodSpec( containers=[container], restart_policy=kargs["restart_policy"], priority_class_name=kargs["pod_priority"], ) # Mount data path if kargs["volume"]: volume_dict = parse_volume(kargs["volume"]) volume_name = kargs["pod_name"] + "-volume" volume = client.V1Volume( name=volume_name, persistent_volume_claim=V1PersistentVolumeClaimVolumeSource( claim_name=volume_dict["claim_name"], read_only=False), ) spec.volumes = [volume] container.volume_mounts = [ client.V1VolumeMount(name=volume_name, mount_path=volume_dict["mount_path"]) ] pod = client.V1Pod( spec=spec, metadata=client.V1ObjectMeta( name=kargs["pod_name"], labels={ "app": ELASTICDL_APP_NAME, ELASTICDL_JOB_KEY: kargs["job_name"], }, owner_references=self.create_owner_reference( kargs["owner_pod"]), namespace=self.namespace, ), ) if self.cluster: pod = self.cluster.with_pod(pod) return pod
def test_vnf(self): """Test vIMS as proposed by clearwater-live-test It leverages an unofficial Clearwater docker to allow testing from the Kubernetes cluster. See https://github.com/Metaswitch/clearwater-live-test for more details """ time.sleep(120) assert self.namespace assert self.zone container = client.V1Container(name=self.test_container_name, image=self.test_image_name, command=[ "rake", "test[{}]".format(self.zone), "PROXY=bono.{}".format(self.zone), "ELLIS=ellis.{}".format(self.zone), "SIGNUP_CODE=secret", "--trace" ]) spec = client.V1PodSpec(containers=[container], restart_policy="Never") metadata = client.V1ObjectMeta(name=self.test_container_name) body = client.V1Pod(metadata=metadata, spec=spec) api_response = self.corev1.create_namespaced_pod(self.namespace, body) watch_deployment = watch.Watch() for event in watch_deployment.stream( func=self.corev1.list_namespaced_pod, namespace=self.namespace, timeout_seconds=self.watch_timeout): self.__logger.debug(event) if event["object"].metadata.name == self.test_container_name: if (event["object"].status.phase == 'Succeeded' or event["object"].status.phase == 'Failed'): watch_deployment.stop() api_response = self.corev1.read_namespaced_pod_log( name=self.test_container_name, namespace=self.namespace) self.__logger.info(api_response) vims_test_result = {} try: grp = re.search( r'^(\d+) failures out of (\d+) tests run.*\n' r'(\d+) tests skipped$', api_response, re.MULTILINE | re.DOTALL) assert grp vims_test_result["failures"] = int(grp.group(1)) vims_test_result["total"] = int(grp.group(2)) vims_test_result["skipped"] = int(grp.group(3)) vims_test_result['passed'] = (int(grp.group(2)) - int(grp.group(3)) - int(grp.group(1))) if vims_test_result['total'] - vims_test_result['skipped'] > 0: vnf_test_rate = vims_test_result['passed'] / ( vims_test_result['total'] - vims_test_result['skipped']) else: vnf_test_rate = 0 self.result += 1 / 2 * 100 * vnf_test_rate except Exception: # pylint: disable=broad-except self.__logger.exception("Cannot parse live tests results")
def _mock_launcher_pod(self): return client.V1Pod(metadata=client.V1ObjectMeta(owner_references=[ client.V1OwnerReference(api_version='argoproj.io/v1alpha1', kind='Workflow', name='wf-1', uid='wf-uid-1') ]), spec=client.V1PodSpec(containers=[], service_account='sa-1'))
def deploy_rtt_deployment(pod_IPs, pod_node_mapping): for pod, pod_ip in pod_IPs.items(): if pod_ip == None: template = create_pod_template(pod, pod_node_mapping[pod]) api_instance = client.CoreV1Api() namespace = 'default' body = client.V1Pod(metadata=template.metadata, spec=template.spec) api_response = api_instance.create_namespaced_pod(namespace, body)
def create_pod(v1, image): container = client.V1Container(name=CONTAINER_NAME, image=image) spec = client.V1PodSpec(containers=[container]) meta = client.V1ObjectMeta(generate_name=POD_PREFIX) pod = client.V1Pod(spec=spec, metadata=meta) ret = v1.create_namespaced_pod(NAMESPACE, pod) return ret.metadata.name