def namespace(auth): name = "test-{}".format(uuid.uuid4()) helper = KubernetesAnsibleModuleHelper('v1', 'namespace', debug=True, reset_logfile=False, **auth) k8s_obj = helper.create_object( V1Namespace(metadata=V1ObjectMeta(name=name))) assert k8s_obj is not None yield name helper.delete_object(name, None)
def create_secret(v1: CoreV1Api, name: str, data: Dict[str, str], typ: str = 'from-literal'): if name in list_secret_names(v1): logger.info(f"Secret {name} already exists. Skipping.") return logger.info(f"Creating {name}...") metadata = V1ObjectMeta(name=name, namespace='default') secret = V1Secret(api_version='v1', kind='Secret', metadata=metadata, type=typ, data=data) return v1.create_namespaced_secret(namespace='default', body=secret)
def test_create_job_with_template(self, mock_batch_client): mock_batch_client.create_namespaced_job.return_value = V1Job( metadata=V1ObjectMeta() ) job_name = "job" mock_generator = Mock() manager = JobManager( namespace="geerick", signer=Mock(), register=StaticJobDefinitionsRegister({job_name: mock_generator}), ) template_args = {"dummy": "template"} manager.create_job(job_name, template_args=template_args) mock_generator.generate.assert_called_once_with(template_args=template_args)
def unschedulable_pod(): return V1Pod(metadata=V1ObjectMeta(name='unschedulable_pod', annotations=dict(), owner_references=[]), status=V1PodStatus(phase='Pending', conditions=[ V1PodCondition(status='False', type='PodScheduled', reason='Unschedulable') ]), spec=V1PodSpec(containers=[ V1Container(name='container2', resources=V1ResourceRequirements( requests={'cpu': '1.5'})) ], node_selector={'clusterman.com/pool': 'bar'}))
def get_object_meta(self, **kwargs) -> V1ObjectMeta: """Get common object metadata""" return V1ObjectMeta( namespace=self.namespace, labels={ "app.kubernetes.io/name": f"authentik-{self.controller.outpost.type.lower()}", "app.kubernetes.io/instance": slugify(self.controller.outpost.name), "app.kubernetes.io/version": __version__, "app.kubernetes.io/managed-by": "goauthentik.io", "goauthentik.io/outpost-uuid": self.controller.outpost.uuid.hex, }, **kwargs, )
def get_volume_claim_templates(self) -> Sequence[V1PersistentVolumeClaim]: return [ V1PersistentVolumeClaim( metadata=V1ObjectMeta( name=self.get_persistent_volume_name(volume), ), spec=V1PersistentVolumeClaimSpec( # must be ReadWriteOnce for EBS access_modes=["ReadWriteOnce"], storage_class_name=self.get_storage_class_name(), resources=V1ResourceRequirements(requests={ 'storage': f"{volume['size']}Gi", }, ), ), ) for volume in self.get_persistent_volumes() ]
def _build_rs(self): pod_spec = V1PodSpec( containers=self.containers, volumes=self.volumes, node_name=self.node_name, hostname=self.host_name, security_context=self.security_context, ) template = V1PodTemplateSpec(metadata=V1ObjectMeta( labels=self.target_labels, annotations=self.annotations or None), spec=pod_spec) rs_spec = V1beta1ReplicaSetSpec(replicas=self.replicas, selector=self.selector, template=template) rs = V1beta1ReplicaSet(metadata=self.meta, spec=rs_spec) return rs
def test_sdk_e2e(): container = V1Container( name="xgboost", image="docker.io/merlintang/xgboost-dist-iris:1.1", args=[ "--job_type=Train", "--xgboost_parameter=objective:multi:softprob,num_class:3", "--n_estimators=10", "--learning_rate=0.1", "--model_path=/tmp/xgboost-model", "--model_storage_type=local" ], ) master = V1ReplicaSpec( replicas=1, restart_policy="Never", template=V1PodTemplateSpec(spec=V1PodSpec(containers=[container]))) worker = V1ReplicaSpec( replicas=1, restart_policy="Never", template=V1PodTemplateSpec(spec=V1PodSpec(containers=[container]))) xgboostjob = KubeflowOrgV1XGBoostJob( api_version="kubeflow.org/v1", kind="XGBoostJob", metadata=V1ObjectMeta(name="xgboostjob-iris-ci-test", namespace=SDK_TEST_NAMESPACE), spec=KubeflowOrgV1XGBoostJobSpec(run_policy=V1RunPolicy( clean_pod_policy="None", ), xgb_replica_specs={ "Master": master, "Worker": worker })) XGBOOST_CLIENT.create(xgboostjob) XGBOOST_CLIENT.wait_for_job("xgboostjob-iris-ci-test", namespace=SDK_TEST_NAMESPACE) if not XGBOOST_CLIENT.is_job_succeeded("xgboostjob-iris-ci-test", namespace=SDK_TEST_NAMESPACE): raise RuntimeError("The XGBoostJob is not succeeded.") XGBOOST_CLIENT.get_logs("xgboostjob-iris-ci-test", namespace=SDK_TEST_NAMESPACE) XGBOOST_CLIENT.delete("xgboostjob-iris-ci-test", namespace=SDK_TEST_NAMESPACE)
def project(kubeconfig): name = "test-{}".format(uuid.uuid4()) auth = {} if kubeconfig is not None: auth = { 'kubeconfig': str(kubeconfig), 'host': 'https://localhost:8443', 'verify_ssl': False } helper = OpenShiftAnsibleModuleHelper('v1', 'project', debug=True, reset_logfile=False, **auth) k8s_obj = helper.create_project(metadata=V1ObjectMeta(name=name)) assert k8s_obj is not None yield name helper.delete_object(name, None)
def create_storage_class(self): storage_class = V1StorageClass(provisioner="kubernetes.io/glusterfs") storage_class.api_version = "storage.k8s.io/v1" storage_class.kind = "StorageClass" meta = V1ObjectMeta() meta.name = GFS_STORAGE_CLASS storage_class.metadata = meta parameters = { "resturl": HEKETI_REST_URL, "restauthenabled": "false", "volumetype": GFS_STORAGE_REPLICATION } storage_class.parameters = parameters try: api_response = self.storage_api.create_storage_class(storage_class) except ApiException as e: raise Exception(e)
def test_paddle_runtime(): predictor = V1beta1PredictorSpec( min_replicas=1, model=V1beta1ModelSpec( model_format=V1beta1ModelFormat(name="paddle", ), storage_uri= "https://zhouti-mcp-edge.cdn.bcebos.com/resnet50.tar.gz", resources=V1ResourceRequirements( requests={ "cpu": "200m", "memory": "4Gi" }, limits={ "cpu": "200m", "memory": "4Gi" }, ))) service_name = 'isvc-paddle-runtime' isvc = V1beta1InferenceService( api_version=constants.KSERVE_V1BETA1, kind=constants.KSERVE_KIND, metadata=V1ObjectMeta(name=service_name, namespace=KSERVE_TEST_NAMESPACE), spec=V1beta1InferenceServiceSpec(predictor=predictor)) kserve_client = KServeClient( config_file=os.environ.get("KUBECONFIG", "~/.kube/config")) kserve_client.create(isvc) try: kserve_client.wait_isvc_ready(service_name, namespace=KSERVE_TEST_NAMESPACE, timeout_seconds=720) except RuntimeError as e: pods = kserve_client.core_api.list_namespaced_pod( KSERVE_TEST_NAMESPACE, label_selector='serving.kserve.io/inferenceservice={}'.format( service_name)) for pod in pods.items: logging.info(pod) raise e res = predict(service_name, './data/jay.json') assert np.argmax(res["predictions"][0]) == 17 kserve_client.delete(service_name, KSERVE_TEST_NAMESPACE)
def test_sets_label_job(self): signature = "hehehe" signer = JobSigner(signature) job = V1Job(metadata=V1ObjectMeta()) signer.sign(job) assert ( job.metadata.labels[JobSigner.LABEL_KEY] == signature ), "Metadata label not set" job_definition_name = "funfun" signer.sign(job, job_definition_name) assert ( job.metadata.labels[JobSigner.JOB_DEFINITION_NAME_KEY] == job_definition_name ), "Job Definition label not set"
def generate_monitoring_ingress_object( secret_name: str, namespace: str, hosts: [str], service_name: str, service_port: int, ) -> V1beta1Ingress: ingress_path = "/" + service_name + "(/|$)(.*)" return V1beta1Ingress( kind='Ingress', metadata=V1ObjectMeta( annotations={ "nginx.ingress.kubernetes.io/auth-type": "basic", "nginx.ingress.kubernetes.io/auth-secret": secret_name, "nginx.ingress.kubernetes.io/rewrite-target": "/$2", }, name=service_name, namespace=namespace, ), spec=V1beta1IngressSpec(rules=[ V1beta1IngressRule( host=hosts[0], http=V1beta1HTTPIngressRuleValue(paths=[ V1beta1HTTPIngressPath(path=ingress_path, backend=V1beta1IngressBackend( service_name=service_name, service_port=service_port, )) ])), V1beta1IngressRule( host=hosts[1], http=V1beta1HTTPIngressRuleValue(paths=[ V1beta1HTTPIngressPath(path=ingress_path, backend=V1beta1IngressBackend( service_name=service_name, service_port=service_port, )) ])) ], tls=[ V1beta1IngressTLS( hosts=[hosts[0], hosts[1]], secret_name=secret_name, ) ]))
def test_patch_ingress(self): self._networking_api.patch_namespaced_ingress = Mock( side_effect=ApiException(500, 'Test')) ingress = client.NetworkingV1beta1Ingress( metadata=V1ObjectMeta(name='ingress'), ) with self.assertRaises(api.ApiError) as e: patch_ingress(name="ingress", namespace='test', body=ingress, core_api=self._networking_api) expected = ("Error when patching the ingress ingress: (500)\n" "Reason: Test\n") self.assertEqual(expected, str(e.exception)) self.assertEqual(400, e.exception.status_code)
def _create_flush_job( batch_api: BatchV1Api, command: List[str], env: List[V1EnvVar], image: str, name: str, namespace: str, ) -> V1Job: logger.info(f"creating job: {name}") try: return batch_api.create_namespaced_job( namespace=namespace, body=V1Job( api_version="batch/v1", kind="Job", metadata=V1ObjectMeta(name=name, namespace=namespace), spec=V1JobSpec(template=V1PodTemplateSpec(spec=V1PodSpec( containers=[ V1Container( image=image, command=command, name="flush", volume_mounts=[ V1VolumeMount(mount_path="/data", name="queue") ], env=env, ) ], restart_policy="OnFailure", volumes=[ V1Volume( name="queue", persistent_volume_claim=( V1PersistentVolumeClaimVolumeSource( claim_name=name)), ) ], ))), ), ) except ApiException as e: if e.reason == CONFLICT and json.loads( e.body)["reason"] == ALREADY_EXISTS: logger.info(f"using existing job: {name}") return batch_api.read_namespaced_job(name, namespace) raise
def get_replica_pod(self, pod: V1Pod, remaining_iterations: str) -> V1Pod: """ Creates a replica of the given pod :return: replica pod """ pod_annotations = pod.metadata.annotations if pod_annotations == None: pod_annotations = { 'start_time': pod.status.start_time, 'creation_timestamp': pod.metadata.creation_timestamp } return V1Pod( api_version="v1", kind="Pod", metadata=V1ObjectMeta(name=pod.metadata.name + "-rpc", annotations=pod_annotations), spec=V1PodSpec( scheduler_name="heater", containers=[ V1Container( name=pod.spec.containers[0].name, image_pull_policy="IfNotPresent", image="kmeans-static:V4", command=[ "/usr/local/bin/run-kmeans.sh", remaining_iterations ], resources=V1ResourceRequirements( requests={ 'cpu': pod.spec.containers[0].resources. requests['cpu'], 'memory': pod.spec.containers[0].resources. requests['memory'] }, limits={ 'cpu': pod.spec.containers[0].resources.limits['cpu'], 'memory': pod.spec.containers[0].resources. limits['memory'] })) ], restart_policy="OnFailure"))
def list_cluster_role(self): json_data = self.__call_api( '/apis/rbac.authorization.k8s.io/v1/clusterroles', 'GET', path_params={}, query_params=[], header_params={ 'Content-Type': 'application/json', 'Accept': 'application/json' }, body=None, post_params=[], files={}, response_type='V1ClusterRoleList', auth_settings=['BearerToken'], _return_http_data_only=None, collection_formats={}, _preload_content=True, _request_timeout=None) cluster_roles = [] for i in json_data[0]['items']: metadata = V1ObjectMeta( name=i['metadata']['name'], creation_timestamp=self._ApiClientTemp__deserialize_datatime( i['metadata']['creationTimestamp'])) rules = [] if i['rules'] is not None: for rule in i['rules']: resources = None if 'resources' in rule.keys(): resources = rule['resources'] verbs = None if 'verbs' in rule.keys(): verbs = rule['verbs'] rules.append(V1PolicyRule(resources=resources, verbs=verbs)) cluster_role = V1ClusterRole(kind='ClusterRole', metadata=metadata, rules=rules) cluster_roles.append(cluster_role) return V1ClusterRoleList(items=cluster_roles)
def list_cluster_role_binding(self): json_data = self.__call_api( resource_path= '/apis/rbac.authorization.k8s.io/v1/clusterrolebindings', method='GET', path_params={}, query_params=[], header_params={ 'Content-Type': 'application/json', 'Accept': 'application/json' }, body=None, post_params=[], files={}, response_type='V1ClusterRoleBindingList', auth_settings=['BearerToken'], _return_http_data_only=None, collection_formats={}, _preload_content=True, _request_timeout=None) cluster_role_bindings = [] for i in json_data[0]['items']: metadata = V1ObjectMeta(name=i['metadata']['name']) role_ref = V1RoleRef(api_group=i['roleRef']['apiGroup'], name=i['roleRef']['name'], kind=i['roleRef']['kind']) subjects = [] if i['subjects'] is not None: for s in i['subjects']: namespace = None if 'namespace' in s.keys(): namespace = s['namespace'] subjects.append( V1Subject(kind=s['kind'], name=s['name'], namespace=namespace)) cluster_role_binding = V1ClusterRoleBinding(metadata=metadata, role_ref=role_ref, subjects=subjects) cluster_role_bindings.append(cluster_role_binding) return cluster_role_bindings
def put_secret( self, name: str, data: dict = None, namespace: str = 'default', raw_data: dict = None, ): '''creates or updates (replaces) the specified secret. the secret's contents are expected in a dictionary containing only scalar values. In particular, each value is converted into a str; the result returned from to-str conversion is encoded as a utf-8 byte array. Thus such a conversion must not have done before. ''' if not bool(data) ^ bool(raw_data): raise ValueError('Exactly one data or raw data has to be set') ne = not_empty metadata = V1ObjectMeta(name=ne(name), namespace=ne(namespace)) if data: raw_data = { k: base64.b64encode(str(v).encode('utf-8')).decode('utf-8') for k, v in data.items() } secret = V1Secret(metadata=metadata, data=raw_data) # find out whether we have to replace or to create try: self.core_api.read_namespaced_secret(name=name, namespace=namespace) secret_exists = True except ApiException as ae: # only 404 is expected if not ae.status == 404: raise ae secret_exists = False if secret_exists: self.core_api.replace_namespaced_secret(name=name, namespace=namespace, body=secret) else: self.core_api.create_namespaced_secret(namespace=namespace, body=secret)
def _generate_mpi_job(self, runobj: RunObject, execution: MLClientCtx, meta: client.V1ObjectMeta) -> typing.Dict: job = deepcopy(self._mpijob_template) pod_labels = deepcopy(meta.labels) pod_labels["mlrun/job"] = meta.name update_in(job, "metadata", meta.to_dict()) update_in(job, "spec.template.metadata.labels", pod_labels) update_in(job, "spec.replicas", self.spec.replicas or 1) if self.spec.image: self._update_container(job, "image", self.full_image_path()) update_in(job, "spec.template.spec.volumes", self.spec.volumes) self._update_container(job, "volumeMounts", self.spec.volume_mounts) update_in(job, "spec.template.spec.nodeName", self.spec.node_name) update_in(job, "spec.template.spec.nodeSelector", self.spec.node_selector) update_in(job, "spec.template.spec.affinity", self.spec._get_sanitized_affinity()) extra_env = self._generate_runtime_env(runobj) extra_env = [{"name": k, "value": v} for k, v in extra_env.items()] self._update_container(job, "env", extra_env + self.spec.env) if self.spec.image_pull_policy: self._update_container(job, "imagePullPolicy", self.spec.image_pull_policy) if self.spec.resources: self._update_container(job, "resources", self.spec.resources) if self.spec.workdir: self._update_container(job, "workingDir", self.spec.workdir) if self.spec.image_pull_secret: update_in( job, "spec.template.spec.imagePullSecrets", [{ "name": self.spec.image_pull_secret }], ) if self.spec.command: self._update_container(job, "command", ["mpirun", "python", self.spec.command] + self.spec.args) return job
def test_delete_garbage(mocker, tensorboard_manager_mocked: TensorboardManager, current_datetime: datetime, delete_count: int): mocker.patch.object( TensorboardManager, '_get_current_datetime').return_value = current_datetime mocker.patch.object(tensorboard_manager_mocked, 'list').return_value = [ V1Deployment(metadata=V1ObjectMeta(name='fake-name')) ] mocker.patch.object(tensorboard_manager_mocked, 'delete') mocker.patch.object(tensorboard.tensorboard, 'try_get_last_request_datetime').\ return_value = datetime(year=2018, month=6, day=19, hour=12, minute=0) mocker.patch.object(tensorboard_manager_mocked, 'refresh_garbage_timeout') mocker.patch.object(tensorboard_manager_mocked, 'get_garbage_timeout').return_value = 1800 tensorboard_manager_mocked.delete_garbage() # noinspection PyUnresolvedReferences assert tensorboard_manager_mocked.delete.call_count == delete_count
def pod_disruption_budget_for_service_instance( service: str, instance: str, min_instances: int, ) -> V1beta1PodDisruptionBudget: return V1beta1PodDisruptionBudget( metadata=V1ObjectMeta( name=f"{service}-{instance}", namespace="paasta", ), spec=V1beta1PodDisruptionBudgetSpec( min_available=min_instances, selector=V1LabelSelector(match_labels={ "service": service, "instance": instance, }, ), ), )
def test_sanitize_config_hash(self): mock_config = V1Deployment( metadata=V1ObjectMeta( name='qwe', labels={ 'mc': 'grindah', }, ), spec=V1DeploymentSpec( replicas=2, selector=V1LabelSelector(match_labels={ 'freq': '108.9', }, ), template=V1PodTemplateSpec(), ), ) ret = self.deployment.sanitize_for_config_hash(mock_config) assert 'replicas' not in ret['spec'].keys()
def _build_deployment(self): pod_spec = V1PodSpec(containers=self.containers, volumes=self.volumes, node_name=self.node_name, hostname=self.host_name) template = V1PodTemplateSpec(metadata=V1ObjectMeta( labels=self.target_labels, annotations=self.annotations or None), spec=pod_spec) strategy = AppsV1beta1DeploymentStrategy( type="RollingUpdate", rolling_update=AppsV1beta1RollingUpdateDeployment(max_surge=0)) deployment_spec = AppsV1beta1DeploymentSpec(replicas=self.replicas, selector=self.selector, template=template, strategy=strategy) deployment = AppsV1beta1Deployment(metadata=self.meta, spec=deployment_spec) return deployment
def copy_secrets(from_ns: str, to_ns: str, secret_names: [str]): for arg in [from_ns, to_ns, secret_names]: ensure_not_empty(arg) info('args: from: {}, to: {}, names: {}'.format(from_ns, to_ns, secret_names)) core_api = ctx.create_core_api() # new metadata used to overwrite the ones from retrieved secrets metadata = V1ObjectMeta(namespace=to_ns) for name in secret_names: secret = core_api.read_namespaced_secret(name=name, namespace=from_ns, export=True) metadata.name = name secret.metadata = metadata core_api.create_namespaced_secret(namespace=to_ns, body=secret)
def update_prometheus_adapter_configmap( kube_client: KubeClient, config: PrometheusAdapterConfig) -> None: kube_client.core.replace_namespaced_config_map( name=PROMETHEUS_ADAPTER_CONFIGMAP_NAME, namespace=PROMETHEUS_ADAPTER_CONFIGMAP_NAMESPACE, body=V1ConfigMap( metadata=V1ObjectMeta(name=PROMETHEUS_ADAPTER_CONFIGMAP_NAME), data={ PROMETHEUS_ADAPTER_CONFIGMAP_FILENAME: yaml.dump( config, default_flow_style=False, explicit_start=True, width=sys.maxsize, ) }, ), )
def create_v1job( cls, job_spec: JobSpec, name: str, labels: Optional[Dict[str, str]] = None, ) -> V1Job: '''creates a V1Job from a JobSpec, a job name, and an optional set of labels''' name = util.sanitize_job_name(name) # todo: sanitize labels job_metadata = V1ObjectMeta(generate_name=name + '-') #, labels=labels) return V1Job(api_version=k.BATCH_V1_VERSION, kind='Job', metadata=job_metadata, spec=job_spec.spec)
def create_config_maps(self, file_names): """Create Kubernetes ConfigMaps for the input files to the simulation. """ for file_name, id_ in file_names: with open(file_name, 'r') as fd: data = fd.read() metadata = V1ObjectMeta( name=id_, namespace='default' ) configmap = V1ConfigMap( api_version='v1', kind='ConfigMap', data={os.path.basename(file_name): data}, metadata=metadata ) print(configmap) self.core_v1.create_namespaced_config_map(namespace='default', body=configmap)
def daemonset_pod(): return V1Pod(metadata=V1ObjectMeta(name='daemonset_pod', annotations=dict(), owner_references=[ V1OwnerReference(kind='DaemonSet', api_version='foo', name='daemonset', uid='bar') ]), status=V1PodStatus( phase='Running', host_ip='10.10.10.2', ), spec=V1PodSpec(containers=[ V1Container(name='container1', resources=V1ResourceRequirements( requests={'cpu': '1.5'})) ], ))
def template_metadata( accelerator: Optional[Accelerator] = None, tpu_driver: str = k.DEFAULT_TPU_DRIVER) -> Optional[V1ObjectMeta]: """generates template metadata for given accelerator type Args: accelerator: accelerator type, or None for cpu tpu_driver: tpu driver to use Returns: template metadata necessary for given accelerator """ if type(accelerator) == TPU: return V1ObjectMeta( annotations={k.TEMPLATE_META_ANNOTATION_TPU_DRIVER: tpu_driver}) return None