def clusterrolebindings_from_marker(item, namespace): """Create ClusterRoleBindings for the test case if the test case is marked with the `pytest.mark.clusterrolebinding` marker. Args: item (pytest.Item): The pytest test item. namespace (str): The namespace of the test case. Return: list[objects.ClusterRoleBinding]: The ClusterRoleBindings that were generated from the test case markers. """ clusterrolebindings = [] for mark in item.iter_markers(name='clusterrolebinding'): name = mark.args[0] subj_kind = mark.kwargs.get('subject_kind') subj_name = mark.kwargs.get('subject_name') subj = get_custom_rbac_subject(namespace, subj_kind, subj_name) if not subj: subj = get_default_rbac_subjects(namespace) clusterrolebindings.append( ClusterRoleBinding( client.V1ClusterRoleBinding( metadata=client.V1ObjectMeta(name='kubetest:{}'.format( item.name), ), role_ref=client.V1RoleRef( api_group='rbac.authorization.k8s.io', kind='ClusterRole', name=name, ), subjects=subj, ))) return clusterrolebindings
def test_xgboost_runtime_kserve(): service_name = "isvc-xgboost-runtime" predictor = V1beta1PredictorSpec( min_replicas=1, model=V1beta1ModelSpec( model_format=V1beta1ModelFormat(name="xgboost", ), storage_uri="gs://kfserving-examples/models/xgboost/1.5/model", resources=V1ResourceRequirements( requests={ "cpu": "100m", "memory": "256Mi" }, limits={ "cpu": "100m", "memory": "256Mi" }, ), ), ) isvc = V1beta1InferenceService( api_version=constants.KSERVE_V1BETA1, kind=constants.KSERVE_KIND, metadata=client.V1ObjectMeta(name=service_name, namespace=KSERVE_TEST_NAMESPACE), spec=V1beta1InferenceServiceSpec(predictor=predictor), ) kserve_client = KServeClient( config_file=os.environ.get("KUBECONFIG", "~/.kube/config")) kserve_client.create(isvc) kserve_client.wait_isvc_ready(service_name, namespace=KSERVE_TEST_NAMESPACE) res = predict(service_name, "./data/iris_input.json") assert res["predictions"] == [1, 1] kserve_client.delete(service_name, KSERVE_TEST_NAMESPACE)
def _create_service(self, **kargs): labels = self._get_common_labels() metadata = client.V1ObjectMeta( name=kargs["name"], labels=labels, # Note: We have to add at least one annotation here. # Otherwise annotation is `None` and cannot be modified # using `with_service()` for cluster specific information. annotations=labels, owner_references=self.create_owner_reference(kargs["owner"]) if "owner" in kargs else None, namespace=self.namespace, ) selector = { "app": ELASTICDL_APP_NAME, ELASTICDL_JOB_KEY: self.job_name, ELASTICDL_REPLICA_TYPE_KEY: kargs["replica_type"], } if kargs["replica_index"] is not None: selector[ELASTICDL_REPLICA_INDEX_KEY] = str(kargs["replica_index"]) spec = client.V1ServiceSpec( ports=[ client.V1ServicePort(port=kargs["port"], target_port=kargs["target_port"]) ], selector=selector, type=kargs.get("service_type", None), ) service = client.V1Service(api_version="v1", kind="Service", metadata=metadata, spec=spec) if self.cluster: service = self.cluster.with_service(service) return self.client.create_namespaced_service(self.namespace, service)
def get_deployment_spec(namespace, app, name, project_name, project_uuid, volume_mounts, volumes, image, command, args, ports, container_name=None, resources=None, role=None, type=None, replicas=1): labels = get_labels(app=app, project_name=project_name, project_uuid=project_uuid, role=role, type=type) metadata = client.V1ObjectMeta(name=constants.DEPLOYMENT_NAME.format( name=name, project_uuid=project_uuid), labels=labels, namespace=namespace) pod_spec = get_project_pod_spec(volume_mounts=volume_mounts, volumes=volumes, image=image, container_name=container_name, command=command, args=args, resources=resources, ports=ports) template_spec = client.V1PodTemplateSpec(metadata=metadata, spec=pod_spec) return client.AppsV1beta1DeploymentSpec(replicas=replicas, template=template_spec)
def create_namespace(self): """ Create namespace with random name :return: name of new created namespace """ name = 'namespace-{random_string}'.format(random_string=random_str(5)) namespace = client.V1Namespace(metadata=client.V1ObjectMeta(name=name)) self.core_api.create_namespace(namespace) logger.info("Creating namespace: %s", name) # save all namespaces created with this backend self.managed_namespaces.append(name) # wait for namespace to be ready Probe(timeout=30, pause=5, expected_retval=True, fnc=self._namespace_ready, namespace=name).run() return name
def _scanning_pod(self): namespace = "astronomer-scan-test" pod_name = f"network-scanner-{randint(0,100000)}" v1container = client.V1Container(name="scanner") v1container.command = ["sleep", "300"] v1container.image = "alpine" v1podspec = client.V1PodSpec(containers=[v1container]) v1objectmeta = client.V1ObjectMeta(name=pod_name) v1pod = client.V1Pod(spec=v1podspec, metadata=v1objectmeta) logging.info(f"Creating {pod_name} pod in namespace {namespace}") # --as=system:serviceaccount:astronomer:default pod = self.v1.create_namespaced_pod(namespace, v1pod) # allow pod to become 'Pending' sleep(2) try: timeout = 120 start = time() while True: if time() - start > timeout: raise Exception("Timed out waiting for pod to start") sleep(1) check_pod = self.v1.read_namespaced_pod(pod.metadata.name, namespace) if check_pod.status.container_statuses[0].ready: logging.info("network-scanner is ready") break test_fixture = testinfra.get_host( f"kubectl://{pod.metadata.name}?" + f"container={v1container.name}&namespace={namespace}" ) logging.info("Installing nmap into network-scanner") test_fixture.check_output("apk add nmap") test_fixture.exists("nmap") yield test_fixture finally: logging.info(f"Cleaning up network-scanner pod from namespace {namespace}") self.v1.delete_namespaced_pod(v1pod.metadata.name, namespace)
def get_secret(namespace, project_name, experiment_group_name, experiment_name, project_uuid, experiment_group_uuid, experiment_uuid, user_token): name = constants.SECRET_NAME.format(experiment_uuid=experiment_uuid) labels = get_map_labels(project_name, experiment_group_name, experiment_name, project_uuid, experiment_group_uuid, experiment_uuid) metadata = client.V1ObjectMeta(name=name, labels=labels, namespace=namespace) data = { constants.SECRET_USER_TOKEN: base64.b64encode(bytes(user_token, 'utf-8')).decode("utf-8") } return client.V1Secret(api_version=k8s_constants.K8S_API_VERSION_V1, kind=k8s_constants.K8S_SECRET_KIND, metadata=metadata, type="Opaque", data=data)
def store_project_secrets(self, project, secrets, namespace=""): secret_name = self.get_project_secret_name(project) namespace = self.resolve_namespace(namespace) try: k8s_secret = self.v1api.read_namespaced_secret(secret_name, namespace) except ApiException as exc: # If secret doesn't exist, we'll simply create it if exc.status != 404: logger.error(f"failed to retrieve k8s secret: {exc}") raise exc k8s_secret = client.V1Secret(type="Opaque") k8s_secret.metadata = client.V1ObjectMeta( name=secret_name, namespace=namespace ) k8s_secret.string_data = secrets self.v1api.create_namespaced_secret(namespace, k8s_secret) return secret_data = k8s_secret.data.copy() for key, value in secrets.items(): secret_data[key] = base64.b64encode(value.encode()).decode("utf-8") k8s_secret.data = secret_data self.v1api.replace_namespaced_secret(secret_name, namespace, k8s_secret)
def update_ingress(self, ingress_name: str, ingress_body: dict): paths = self._update_ingress_paths(ingress_body) body = client.NetworkingV1beta1Ingress( api_version="networking.k8s.io/v1beta1", kind="Ingress", metadata=client.V1ObjectMeta( name=ingress_name, annotations={ "nginx.ingress.kubernetes.io/rewrite-target": "/" }), spec=client.NetworkingV1beta1IngressSpec(rules=[ client.NetworkingV1beta1IngressRule( http=client.NetworkingV1beta1HTTPIngressRuleValue( paths=paths)) ])) # check the current ingress list ingress_list = self.api_instance.list_namespaced_ingress_with_http_info( namespace='default') if ingress_list[1] != 200: raise ServiceRequestError("ingress response code is not 200") # get the ingress name of each ingress ingress_names = [ele.metadata.name for ele in ingress_list[0].items] # check if the ingress_name in the list if ingress_name in ingress_names: # if the ingress is exist, update it self.api_instance.replace_namespaced_ingress_with_http_info( name=ingress_name, namespace='default', body=body) else: # otherwise, create new one self.api_instance.create_namespaced_ingress_with_http_info( namespace='default', body=body)
def create_event(reason: str, involved_object: api.V1ObjectReference, message: str, timestamp: datetime.datetime, namespace: str = ''): msg_hash = hashlib.sha1(message.encode()).hexdigest()[:8] event_name = f'{involved_object.name}.{int(timestamp.timestamp())}.{msg_hash}' event = api.V1Event( metadata=api.V1ObjectMeta( name=event_name, namespace=namespace, ), involved_object=involved_object, reason=reason, message=message, first_timestamp=timestamp, last_timestamp=timestamp, count=1, type='Warning', source=api.V1EventSource(component='kube-resources-analyzer', ), ) v1 = api.CoreV1Api() v1.create_namespaced_event(namespace, event)
def add_ecr_config(kube_manager, pod_spec, namespace): if not kube_manager.secret_exists('ecr-config', namespace): secret = client.V1Secret( metadata=client.V1ObjectMeta(name='ecr-config'), string_data={'config.json': '{"credsStore": "ecr-login"}'}) kube_manager.create_secret(namespace, secret) volume_mount = client.V1VolumeMount(name='ecr-config', mount_path='/kaniko/.docker/', read_only=True) if pod_spec.containers[0].volume_mounts: pod_spec.containers[0].volume_mounts.append(volume_mount) else: pod_spec.containers[0].volume_mounts = [volume_mount] volume = client.V1Volume( name='ecr-config', secret=client.V1SecretVolumeSource(secret_name='ecr-config')) if pod_spec.volumes: pod_spec.volumes.append(volume) else: pod_spec.volumes = [volume]
def create_job_manifest(n_job, n_node): container = client.V1Container( name="pn-generator", image="maho/pn_generator:0.7", env=[ client.V1EnvVar(name="BROKER_URL",value="amqp://*****:*****@taskqueue:5672"), client.V1EnvVar(name="QUEUE",value="taskqueue") ] ) template = client.V1PodTemplateSpec( spec=client.V1PodSpec(containers=[container], restart_policy="Never" )) spec = client.V1JobSpec( backoff_limit=4, template=template, completions=n_job, parallelism=n_node) job = client.V1Job( api_version="batch/v1", kind="Job", metadata=client.V1ObjectMeta(name=OBJECT_NAME), spec=spec) return job
def event_body(self): test = self.service.get_test(self.component_name, self.namespace) obj_meta = client.V1ObjectMeta( generate_name="{}".format(self.__event_body.reason.value)) obj_ref = client.V1ObjectReference( kind=PilotService.test_crd_config.kind, api_version=PilotService.test_crd_config.get_full_api_version(), name=self.component_name, resource_version=test['metadata']['resourceVersion'], uid=test['metadata']['uid'], namespace=self.namespace) event_source = client.V1EventSource(component=self.component_name) return client.V1Event(involved_object=obj_ref, metadata=obj_meta, message=self.__event_body.message, count=1, type=self.__event_body.event_type.name, reason=self.__event_body.reason.name, source=event_source, first_timestamp=self.first_timestamp, last_timestamp=self.last_timestamp)
def clusterrolebindings_from_marker( item: pytest.Item, namespace: str) -> List[ClusterRoleBinding]: """Create ClusterRoleBindings for the test case if the test case is marked with the `pytest.mark.clusterrolebinding` marker. Args: item: The pytest test item. namespace: The namespace of the test case. Return: The ClusterRoleBindings which were generated from the test case markers. """ clusterrolebindings = [] for mark in item.iter_markers(name="clusterrolebinding"): name = mark.args[0] subj_kind = mark.kwargs.get("subject_kind") subj_name = mark.kwargs.get("subject_name") subj = get_custom_rbac_subject(namespace, subj_kind, subj_name) if not subj: subj = get_default_rbac_subjects(namespace) clusterrolebindings.append( ClusterRoleBinding( client.V1ClusterRoleBinding( metadata=client.V1ObjectMeta( name=f"kubetest:{item.name}", ), role_ref=client.V1RoleRef( api_group="rbac.authorization.k8s.io", kind="ClusterRole", name=name, ), subjects=subj, ))) return clusterrolebindings
def create(self, enable_istio: bool = True, add_pull_secret: bool = True) -> None: """ Create namespace if doesn't exist. :param enable_istio: :param add_pull_secret: :return: """ from kubernetes import client if self.name not in [ ns.metadata.name for ns in self.li.kube.list_namespace().items ]: namespace = client.V1Namespace() namespace.metadata = client.V1ObjectMeta(name=self.name) self.li.kube.create_namespace(namespace) if enable_istio: run(f"kubectl label namespace {self.name} istio-injection=enabled --overwrite" ) if add_pull_secret: self._add_pullsecret()
def _build_deployment(self, metadata): """Build deployment Kubernetes object. :param metadata: Common Kubernetes metadata for the interactive deployment. """ container = client.V1Container(name=self.deployment_name, image=self.image) pod_spec = client.V1PodSpec(containers=[container]) template = client.V1PodTemplateSpec( metadata=client.V1ObjectMeta(labels={"app": self.deployment_name}), spec=pod_spec) spec = client.V1DeploymentSpec(selector=client.V1LabelSelector( match_labels={'app': self.deployment_name}), replicas=1, template=template) deployment = client.V1Deployment( api_version="apps/v1", kind="Deployment", metadata=metadata, spec=spec, ) return deployment
def __init__(self, deployment_name, workspace, image, port, path): """Initialise basic interactive deployment builder for Kubernetes. :param deployment_name: Name which identifies all deployment objects and maps to the workflow it belongs. :param workspace: Path to the interactive session workspace, which matches with the workflow workspace the interactive session belongs to. :param image: Docker image which the deployment will use as base. :param port: Port exposed by the Docker image. :param path: Path where the interactive session will be accessible from outside the cluster. """ self.deployment_name = deployment_name self.workspace = workspace self.image = image self.port = port self.path = path metadata = client.V1ObjectMeta(name=deployment_name, ) self.kubernetes_objects = { "ingress": self._build_ingress(metadata), "service": self._build_service(metadata), "deployment": self._build_deployment(metadata), }
def __createRoleBinding(self): """Creates rolebinding for namespaced tunneling service and user. """ # generate metadata metadata = client.V1ObjectMeta(name=self.serviceName, namespace=self.namespace) subjects = [client.V1Subject(name="default", kind="ServiceAccount")] role_ref = client.V1RoleRef(kind="Role", api_group="rbac.authorization.k8s.io", name=self.namespace) body = client.V1RoleBinding(metadata=metadata, subjects=subjects, role_ref=role_ref) # create rolebindings try: self.rbac_instance.create_namespaced_role_binding( namespace=self.namespace, body=body) except ApiException as e: if e.status == 409: pass else: logging.error(e) raise
def get_config_map(namespace, project_name, experiment_group_name, experiment_name, project_uuid, experiment_group_uuid, experiment_uuid, cluster_def, declarations, log_level): name = constants.CONFIG_MAP_NAME.format(experiment_uuid=experiment_uuid) labels = get_map_labels(project_name, experiment_group_name, experiment_name, project_uuid, experiment_group_uuid, experiment_uuid) metadata = client.V1ObjectMeta(name=name, labels=labels, namespace=namespace) experiment_outputs_path = get_experiment_outputs_path(experiment_name) experiment_logs_path = get_experiment_logs_path(experiment_name) experiment_data_path = get_project_data_path(project_name) data = { constants.CONFIG_MAP_CLUSTER_KEY_NAME: json.dumps(cluster_def), constants.CONFIG_MAP_DECLARATIONS_KEY_NAME: json.dumps(declarations) or '{}', constants.CONFIG_MAP_EXPERIMENT_INFO_KEY_NAME: json.dumps(labels), constants.CONFIG_MAP_LOG_LEVEL_KEY_NAME: log_level, constants.CONFIG_MAP_API_KEY_NAME: 'http://{}:{}'.format(settings.POLYAXON_K8S_API_HOST, settings.POLYAXON_K8S_API_PORT), constants.CONFIG_MAP_EXPERIMENT_OUTPUTS_PATH_KEY_NAME: experiment_outputs_path, constants.CONFIG_MAP_EXPERIMENT_LOGS_PATH_KEY_NAME: experiment_logs_path, constants.CONFIG_MAP_EXPERIMENT_DATA_PATH_KEY_NAME: experiment_data_path, } return client.V1ConfigMap(api_version=k8s_constants.K8S_API_VERSION_V1, kind=k8s_constants.K8S_CONFIG_MAP_KIND, metadata=metadata, data=data)
def create_cronjob(namespace, dbhost): try: config.load_kube_config() except: config.load_incluster_config() api = client.BatchV1beta1Api() body = client.V1beta1CronJob( metadata=client.V1ObjectMeta(name=namespace), spec=client.V1beta1CronJobSpec( job_template=client.V1beta1JobTemplateSpec(spec=client.V1JobSpec( template=client.V1PodTemplateSpec( spec=client.V1PodSpec(containers=[ client.V1Container( name="scheduler", image="sahandha/lsstscheduler", args=[ "/bin/bash", "-c", "python /sched.py {} {};". format(namespace, dbhost) ], resources=client.V1ResourceRequirements( requests={ 'memory': "200Mi", 'cpu': "100m" })) ], restart_policy="OnFailure")))), schedule="*/1 * * * *")) try: api = api.create_namespaced_cron_job(namespace, body) except ApiException as e: print( "Exception when calling BatchV1beta1Api->create_namespaced_cron_job: %s\n" % e)
def get_config_map(namespace, project_name, experiment_group_name, experiment_name, project_uuid, experiment_group_uuid, experiment_uuid, original_name, cloning_strategy, cluster_def, persistence_outputs, persistence_data, declarations, log_level): name = constants.CONFIG_MAP_NAME.format(uuid=experiment_uuid) labels = get_map_labels(project_name, experiment_group_name, experiment_name, project_uuid, experiment_group_uuid, experiment_uuid) metadata = client.V1ObjectMeta(name=name, labels=labels, namespace=namespace) experiment_outputs_path = get_experiment_outputs_path( persistence_outputs=persistence_outputs, experiment_name=experiment_name, original_name=original_name, cloning_strategy=cloning_strategy) experiment_logs_path = get_experiment_logs_path(experiment_name, temp=False) data = { constants.CONFIG_MAP_CLUSTER_KEY_NAME: json.dumps(cluster_def), constants.CONFIG_MAP_DECLARATIONS_KEY_NAME: json.dumps(declarations) or '{}', constants.CONFIG_MAP_EXPERIMENT_INFO_KEY_NAME: json.dumps(labels), constants.CONFIG_MAP_LOG_LEVEL_KEY_NAME: log_level, constants.CONFIG_MAP_RUN_OUTPUTS_PATH_KEY_NAME: experiment_outputs_path, constants.CONFIG_MAP_RUN_LOGS_PATH_KEY_NAME: experiment_logs_path, constants.CONFIG_MAP_RUN_DATA_PATHS_KEY_NAME: persistence_data, API_HTTP_URL: get_settings_http_api_url(), API_WS_HOST: get_settings_ws_api_url(), } return client.V1ConfigMap(api_version=k8s_constants.K8S_API_VERSION_V1, kind=k8s_constants.K8S_CONFIG_MAP_KIND, metadata=metadata, data=data)
def create_service(project_name: str, service_ports: list, service_type: str = None, namespace: str = "default"): default_expose = 30000 project_hash = hashlib.sha256(project_name.encode()).hexdigest()[:16] spec_ports = [ client.V1ServicePort(port=default_expose + idx, target_port=p) for idx, p in enumerate(service_ports) ] service_spec = client.V1ServiceSpec( selector={"identifier": project_hash}, type="NodePort" if not service_type else service_type, ports=spec_ports) service = client.V1Service( api_version="v1", metadata=client.V1ObjectMeta(name="p" + project_hash + "-service"), spec=service_spec) v1 = client.CoreV1Api() resp = v1.create_namespaced_service(namespace=namespace, body=service) # print(resp) return resp
def _create_service(self, app_info): deployment_name = app_info['app_name'] container_port, host_port = self._get_ports(app_info) v1_object_meta = client.V1ObjectMeta() v1_object_meta.name = deployment_name v1_service_port = client.V1ServicePort(port=host_port, target_port=container_port) v1_service_spec = client.V1ServiceSpec() v1_service_spec.ports = [v1_service_port] v1_service_spec.type = "LoadBalancer" v1_service_spec.selector = {"app": deployment_name} v1_service = client.V1Service() v1_service.spec = v1_service_spec v1_service.metadata = v1_object_meta core_v1 = client.CoreV1Api() api_response = core_v1.create_namespaced_service(namespace="default", body=v1_service) fmlogger.debug(api_response)
def get_service(namespace, name, labels, ports, target_ports, service_type=None, external_i_ps=None): external_i_ps = to_list(external_i_ps) if external_i_ps else None ports = to_list(ports) metadata = client.V1ObjectMeta(name=name, labels=labels, namespace=namespace) service_ports = [ client.V1ServicePort(port=port, target_port=target_port) for port, target_port in zip(ports, target_ports) ] spec = client.V1ServiceSpec(selector=labels, type=service_type, external_i_ps=external_i_ps, ports=service_ports) return client.V1Service(api_version=k8s_constants.K8S_API_VERSION_V1, kind=k8s_constants.K8S_SERVICE_KIND, metadata=metadata, spec=spec)
def generate_deployment_spec(self, pod_template_spec): """Returns a TFJob template :param pod_template_spec: template spec for pod """ self.set_container_name(pod_template_spec) worker_replica_spec = {} worker_replica_spec['replicas'] = self.distribution['Worker'] worker_replica_spec['template'] = pod_template_spec ps_replica_spec = {} ps_replica_spec['replicas'] = self.distribution.get('PS', 0) ps_replica_spec['template'] = pod_template_spec chief_replica_spec = {} chief_replica_spec['replicas'] = self.distribution.get('Chief', 0) chief_replica_spec['template'] = pod_template_spec spec = {} spec['tfReplicaSpecs'] = {} spec['tfReplicaSpecs']['Worker'] = worker_replica_spec if chief_replica_spec['replicas'] > 0: spec['tfReplicaSpecs']['Chief'] = chief_replica_spec if ps_replica_spec['replicas'] > 0: spec['tfReplicaSpecs']['PS'] = ps_replica_spec tf_job = {} tf_job['kind'] = constants.TF_JOB_KIND tf_job['apiVersion'] = 'kubeflow.org/' + constants.TF_JOB_VERSION tf_job['metadata'] = k8s_client.V1ObjectMeta( generate_name=self.job_name, labels=self.labels) tf_job['spec'] = spec return tf_job
def construct_pod(name, labels=None, owner_replicaset=None): """Construct a fake Pod body""" if owner_replicaset: owner_references = [ client.V1OwnerReference( api_version=owner_replicaset.api_version, uid=uuid.uuid4().hex, name=owner_replicaset.metadata.name, kind='ReplicaSet', ) ] labels = owner_replicaset.spec.selector.match_labels else: owner_references = [] return client.V1Pod( api_version='v1', kind='Pod', metadata=client.V1ObjectMeta(name=name, labels=labels, owner_references=owner_references), spec=client.V1PodSpec( containers=[client.V1Container(name="main", image="busybox")]), )
def bind_role_with_api(name, namespace, labels, subject_name, subject_kind='ServiceAccount'): # Using API because of bug https://github.com/canonical/operator/issues/390 logging.info('Creating role binding with K8s API') _load_kube_config() with client.ApiClient() as api_client: api_instance = client.RbacAuthorizationV1Api(api_client) body = client.V1RoleBinding(metadata=client.V1ObjectMeta( name=name, namespace=namespace, labels=labels), role_ref=client.V1RoleRef( api_group='rbac.authorization.k8s.io', kind='Role', name=name, ), subjects=[ client.V1Subject(kind=subject_kind, name=subject_name), ]) try: api_instance.create_namespaced_role_binding(namespace, body, pretty=True) return True except ApiException as err: logging.exception( "Exception when calling RbacAuthorizationV1Api->create_namespaced_role_binding." ) if err.status != 409: # ignoring 409 (AlreadyExists) errors return False else: return True
def deploy_job(self): """Run Security job It runs a single security job and then simply prints its output asis. """ assert self.job_name api_response = self.corev1.create_namespace( client.V1Namespace(metadata=client.V1ObjectMeta( generate_name=self.ns_generate_name))) self.namespace = api_response.metadata.name self.__logger.debug("create_namespace: %s", api_response) with open(pkg_resources.resource_filename( "functest_kubernetes", "security/{}.yaml".format(self.job_name))) as yfile: body = yaml.safe_load(yfile) api_response = self.batchv1.create_namespaced_job( body=body, namespace=self.namespace) self.__logger.info("Job %s created", api_response.metadata.name) self.__logger.debug("create_namespaced_job: %s", api_response) watch_job = watch.Watch() for event in watch_job.stream( func=self.batchv1.list_namespaced_job, namespace=self.namespace, timeout_seconds=self.watch_timeout): if (event["object"].metadata.name == self.job_name and event["object"].status.succeeded == 1): self.__logger.info( "%s started in %0.2f sec", event['object'].metadata.name, time.time()-self.start_time) watch_job.stop() pods = self.corev1.list_namespaced_pod( self.namespace, label_selector='job-name={}'.format(self.job_name)) self.pod = pods.items[0].metadata.name self.pod_log = self.corev1.read_namespaced_pod_log( name=self.pod, namespace=self.namespace) self.__logger.info("\n\n%s", self.pod_log)
def create_ingress(networking_v1_beta1_api): body = client.NetworkingV1beta1Ingress( api_version="networking.k8s.io/v1beta1", kind="Ingress", metadata=client.V1ObjectMeta( name="ingress-example", annotations={ "nginx.ingress.kubernetes.io/rewrite-target": "/" }), spec=client.NetworkingV1beta1IngressSpec(rules=[ client.NetworkingV1beta1IngressRule( host="example.com", http=client.NetworkingV1beta1HTTPIngressRuleValue(paths=[ client.NetworkingV1beta1HTTPIngressPath( path="/", backend=client.NetworkingV1beta1IngressBackend( service_port=5678, service_name="service-example")) ])) ])) # Creation of the Deployment in specified namespace # (Can replace "default" with a namespace you may have created) networking_v1_beta1_api.create_namespaced_ingress(namespace="default", body=body)
def _initialise_user_secrets_store(self): """Initialise an empty Kubernetes secret for a given user.""" try: empty_k8s_secret = client.V1Secret( api_version="v1", metadata=client.V1ObjectMeta( name=str(self.user_secret_store_id), namespace=REANA_RUNTIME_KUBERNETES_NAMESPACE, ), data={}, ) empty_k8s_secret.metadata.annotations = {"secrets_types": "{}"} current_k8s_corev1_api_client.create_namespaced_secret( REANA_RUNTIME_KUBERNETES_NAMESPACE, empty_k8s_secret ) return empty_k8s_secret except ApiException: log.error( "Something went wrong while creating " "Kubernetes secret for user {0}.".format( str(self.user_secret_store_id) ), exc_info=True, )