def log_k8s_event(self, asg_name, price="", useSpot=False): msg_str = '{"apiVersion":"v1alpha1","spotPrice":"' + price + '", "useSpot": ' + str( useSpot).lower() + '}' event_namespace = os.getenv('EVENT_NAMESPACE', 'default') if not self.incluster: logger.info(msg_str) return try: config.load_incluster_config() v1 = client.CoreV1Api() event_timestamp = datetime.now(pytz.utc) event_name = "spot-instance-update" new_event = client.V1Event( count=1, first_timestamp=event_timestamp, involved_object=client.V1ObjectReference( kind="SpotPriceInfo", name=asg_name, namespace=event_namespace, ), last_timestamp=event_timestamp, metadata=client.V1ObjectMeta(generate_name=event_name, ), message=msg_str, reason="SpotRecommendationGiven", source=client.V1EventSource(component="minion-manager", ), type="Normal", ) v1.create_namespaced_event(namespace=event_namespace, body=new_event) logger.info("Spot price info event logged") except Exception as e: logger.info("Failed to log event: " + str(e))
def save_most_frequent_word(message: str): """A pipeline function describing the orchestration of the workflow.""" counter = GetFrequentWordOp(name='get-Frequent', message=message) # Call set_image_pull_secrets after get_pipeline_conf(). dsl.get_pipeline_conf()\ .set_image_pull_secrets([k8s_client.V1ObjectReference(name="secretA")])
def scheduler(name, node, namespace="default"): body = client.V1Binding() target = client.V1ObjectReference() target.kind = 'Node' target.apiVersion = 'v1' target.name = node meta = client.V1ObjectMeta() meta.name = name body.target = target body.metadata = meta try: # Method changed in clinet v6.0 # return v1.create_namespaced_binding(body, namespace) # For v2.0 res = v1.create_namespaced_binding_binding(name, namespace, body) if res: # print 'POD '+name+' scheduled and placed on '+node return True except Exception as a: print( "Exception when calling CoreV1Api->create_namespaced_binding: %s\n" % a) return False
def warn_no_solution_found(self, event, namespace='default'): '''Add event message to pod description when optimizer is unable to find solution.''' object = client.V1ObjectReference( api_version='v1', kind='Pod', name=event['object'].metadata.name, namespace=namespace, resource_version=event['object'].metadata.resource_version, uid=event['object'].metadata.uid) meta = client.V1ObjectMeta(name=event['object'].metadata.name) source = client.V1EventSource(component=self.name) timestamp = datetime.now(pytz.utc) body = client.V1Event( count=1, first_timestamp=timestamp, involved_object=object, last_timestamp=timestamp, message= "Optimizer was unable to find solution for batch containing pod.", metadata=meta, reason='FailedScheduling', source=source, type='Warning') self.api.create_namespaced_event(namespace=namespace, body=body, _preload_content=False)
def scheduler(name, model, ns): ## #Step 3 - Selecting a node ## node = get_nodes(model) logging.info("Putting {0} on {1} in namespace: {2}".format(name, node, ns)) # #https://github.com/kubernetes-client/python/issues/547 #https://github.com/kubernetes-client/python/issues/547#issuecomment-455362558 ## target = client.V1ObjectReference() target.kind = "Node" target.apiVersion = "v1" target.name = node meta = client.V1ObjectMeta() meta.name = name body = client.V1Binding(target=target, metadata=meta) ## #Step 4 - Bind the pod to a node #Finished ## return v1_api.create_namespaced_binding(namespace=ns, body=body)
def CreateEvents(eventsDetails, chaosDetails, kind, eventName, clients): event = client.V1Event( first_timestamp=datetime.now(pytz.utc), last_timestamp=datetime.now(pytz.utc), event_time=datetime.now(pytz.utc), involved_object=client.V1ObjectReference( api_version="litmuschaos.io/v1alpha1", kind=kind, name=eventsDetails.ResourceName, namespace=chaosDetails.ChaosNamespace, uid=eventsDetails.ResourceUID, ), message=eventsDetails.Message, metadata=client.V1ObjectMeta( name=eventName, namespace=chaosDetails.ChaosNamespace, ), reason=eventsDetails.Reason, related=None, action="ChaosEvent", reporting_component="litmuschaos.io/v1alpha1", reporting_instance=eventsDetails.ResourceName, series=None, source=client.V1EventSource(component=chaosDetails.ChaosPodName, ), type=eventsDetails.Type, local_vars_configuration=None, count=1, ) try: clients.clientCoreV1.create_namespaced_event( chaosDetails.ChaosNamespace, body=event) except Exception as exp: return ValueError("Failed to create event with err: {}".format(exp)) return None
def BindPodToNode(self, podname, node, ns): """ Binds a pod to a node to start the deployment process. """ try: target = client.V1ObjectReference() target.kind = "Node" target.apiVersion = "v1" target.name = node meta = client.V1ObjectMeta() meta.name = podname body = client.V1Binding(target=target, metadata=meta) body.target = target body.metadata = meta return self.v1.create_namespaced_binding(namespace=ns, body=body) except ApiException as e: self.logger.error( f'Failed to bind pod {podname} to node {node} in namespace {ns}: {e}' ) return False except ValueError as e: # This is not a real error. It's a problem in the API waiting to be fixed: # https://github.com/kubernetes-client/python/issues/547 pass return True
def scheduler(name, node, namespace): target=client.V1ObjectReference(api_version='v1', kind="Node", name=node) meta=client.V1ObjectMeta() meta.name=name body=client.V1Binding(metadata=meta, target=target) return core_api.create_namespaced_binding(namespace, body, _preload_content=False)
def create_pv(username, namespace, path, storage_size): safe_chars = set(string.ascii_lowercase + string.digits) # Need to format the username that same way jupyterhub does. username = escapism.escape(username, safe=safe_chars, escape_char='-').lower() name = 'gpfs-{!s}'.format(username) claim_name = 'claim-{!s}'.format(username) path = os.path.join(path, username) metadata = client.V1ObjectMeta(name=name, namespace=namespace) claim_ref = client.V1ObjectReference(namespace=namespace, name=claim_name) host_path = client.V1HostPathVolumeSource(path, 'DirectoryOrCreate') spec = client.V1PersistentVolumeSpec( access_modes=[ 'ReadWriteOnce', ], capacity={ 'storage': storage_size, }, claim_ref=claim_ref, host_path=host_path, storage_class_name='gpfs', persistent_volume_reclaim_policy='Retain', volume_mode='Filesystem') pv = client.V1PersistentVolume('v1', 'PersistentVolume', metadata, spec) return pv, path
def get_persistent_volume_spec(namespace, volume, run_type, access_modes='ReadWriteOnce', persistent_volume_reclaim_policy='Recycle'): capacity = {'storage': STORAGE_BY_VOLUME[volume]} access_modes = to_list(access_modes) vol_path = get_vol_path(volume, run_type) if run_type == RunTypes.MINIKUBE: params = get_host_path_pvol(vol_path) elif run_type == RunTypes.KUBERNETES: params = get_nfs_pvol(vol_path) else: raise PolyaxonConfigurationError( 'Run type `{}` is not allowed.'.format(run_type)) volc_name = constants.VOLUME_CLAIM_NAME.format(vol_name=volume) claim_ref = client.V1ObjectReference( api_version=k8s_constants.K8S_API_VERSION_V1, kind=k8s_constants.K8S_PERSISTENT_VOLUME_CLAIM_KIND, name=volc_name, namespace=namespace) return client.V1PersistentVolumeSpec( capacity=capacity, access_modes=access_modes, persistent_volume_reclaim_policy=persistent_volume_reclaim_policy, claim_ref=claim_ref, **params)
def event_body(self): if self.unique_name: obj_meta = client.V1ObjectMeta(name="{}".format(self.event_name)) else: obj_meta = client.V1ObjectMeta( generate_name="{}".format(self.event_name)) # field_path is needed to prevent problems in the namespacewatcher when # deleted event are received obj_ref = client.V1ObjectReference(kind="CephCluster", field_path='spec.containers{mgr}', name=self.event_name, namespace=self.namespace) event_source = client.V1EventSource(component="ceph-mgr", host=self.host) return client.V1Event(involved_object=obj_ref, metadata=obj_meta, message=self.message, count=self.count, type=self.event_type, reason=self.event_reason, source=event_source, first_timestamp=self.first_timestamp, last_timestamp=self.last_timestamp)
def handle_oom_event_saved(sender, instance, created, **kwargs): from kra import tasks oom = instance container = oom.container pod = container.pod tasks.make_suggestion.delay(pod.workload_id, container.name) if not created: return ref = api.V1ObjectReference( kind='Pod', name=pod.name, namespace=pod.namespace, ) msg = f'OOM in container {container.name} of pod {pod.namespace}/{pod.name}, '\ f'comm: {oom.victim_comm}({oom.victim_pid})' if oom.is_critical: msg = 'CRITICAL ' + msg tasks.notifications.create_event.apply_async( args=('ContainerOOM', ref, msg, oom.happened_at, pod.namespace), serializer='pickle', )
def GeneratePodEvent(self, podname, ns, reason, _type, message): """ Generates a pod event on the kubernetes API server """ try: meta = client.V1ObjectMeta() meta.name = f'{podname}.{self.GetRandomUid()}' meta.namespace = ns invobj = client.V1ObjectReference() invobj.name = podname invobj.kind = "Pod" invobj.namespace = ns #invobj.uid = ruid if _type == K8SEventType.EVENT_TYPE_NORMAL: etype = "Normal" lg = self.logger.info else: etype = "Warning" lg = self.logger.warning timestamp = self.GetTimeNow() # Log an event in our pod too instead of duplicating externally lg(f'Event for pod {ns}/{podname} -- Reason={reason}, message={message}') event = client.V1Event(involved_object=invobj, metadata=meta, reason=reason, message=f'NHD: {message}', count=1, type=etype, first_timestamp=timestamp, last_timestamp=timestamp) self.v1.create_namespaced_event(namespace=ns, body=event) except ApiException as e: self.logger.error(f'Failed to send event for pod {podname}: {e}')
def bind_to_node(self, pod_name, node_name, namespace='default'): """ Bind Pod to a Node :param str pod_name: pod name which we are binding :param str node_name: node name which pod has to be binded :param str namespace: namespace of pod :return: True if pod was bound successfully, False otherwise """ target = client.V1ObjectReference() target.kind = "Node" target.api_version = "v1" target.name = node_name meta = client.V1ObjectMeta() meta.name = pod_name body = client.V1Binding(target=target) body.target = target body.metadata = meta try: self.v1.create_namespaced_binding(namespace, body) return True except Exception as e: """ create_namespaced_binding() throws exception: Invalid value for `target`, must not be `None` or despite the fact this exception is being thrown, Pod is bound to a Node and Pod is running """ print('here') print('exception' + str(e)) return False
def imagepullsecrets_pipeline( message: str = "When flies fly behind flies, then flies are following flies."): """A pipeline function describing the orchestration of the workflow.""" counter = get_frequent_word_op(message=message) # Call set_image_pull_secrets after get_pipeline_conf(). dsl.get_pipeline_conf() \ .set_image_pull_secrets([k8s_client.V1ObjectReference(name="secretA")])
def schedule(name, node, namespace='default'): target = client.V1ObjectReference(kind = 'Node', api_version = 'v1', name = node) meta = client.V1ObjectMeta(name = name) body = client.V1Binding(target = target, metadata = meta) try: client.CoreV1Api().create_namespaced_binding(namespace=namespace, body=body) except ValueError: print ("Exception")
def __target_ref(self, leader_ip, latest_subsets, pod): # we want to re-use existing target_ref if possible for subset in latest_subsets: for address in subset.addresses or []: if address.ip == leader_ip and address.target_ref and address.target_ref.name == self._name: return address.target_ref return k8s_client.V1ObjectReference(kind='Pod', uid=pod.metadata.uid, namespace=self._namespace, name=self._name, resource_version=pod.metadata.resource_version)
def inject_env_vars(): dsl.get_pipeline_conf().set_image_pull_secrets([ k8s_client.V1ObjectReference( name="k8scc01covidacr-registry-connection") ]) for var in ('AWS_ACCESS_KEY_ID', 'AWS_SECRET_ACCESS_KEY', 'AWS_REGION', 'S3_REGION', 'S3_ENDPOINT', 'S3_USE_HTTPS', 'S3_VERIFY_SSL'): inject_env_var(var)
def create_k8s_binding(name, node): target = client.V1ObjectReference() target.kind = "Node" target.apiVersion = "v1" target.name = node meta = client.V1ObjectMeta() meta.name = name body = client.V1Binding(metadata=meta, target=target) return body
def test(): try: V1NamespaceBody = client.V1Namespace() pprint("V1NamespaceBody={}".format(V1NamespaceBody)) V1ObjectReferenceBody = client.V1ObjectReference() pprint("V1ObjectReferenceBody={}".format(V1ObjectReferenceBody)) V1ObjectMetaBody = client.V1ObjectMeta() pprint("V1ObjectMetaBody={}".format(V1ObjectMetaBody)) #V1BindingBody = client.V1Binding() #pprint("V1BindingBody={}".format(V1BindingBody)) V1ConfigMapBody = client.V1ConfigMap() pprint("V1ConfigMapBody={}".format(V1ConfigMapBody)) V1Pod = client.V1Pod() pprint("V1Pod={}".format(V1Pod)) V1PodTemplate = client.V1PodTemplate() pprint("V1PodTemplate={}".format(V1PodTemplate)) V1ReplicationController = client.V1ReplicationController() pprint("V1ReplicationController={}".format(V1ReplicationController)) V1Service = client.V1Service() pprint("V1Service={}".format(V1Service)) V1Node = client.V1Node() pprint("V1Node={}".format(V1Node)) pod = 'nginx-no-split-655b866cfd-54xmg' namespace='default' read_pod = apis_api.read_namespaced_pod(name=pod,namespace=namespace) pprint("read_pod={}".format(read_pod)) lifecycle=read_pod.spec.node_selector.lifecycle pprint("lifecycle={}".format(lifecycle)) read_pod.spec.node_selector.lifecycle='OnDemand' pprint("read_pod={}".format(read_pod)) #metadata = read_pod.metadata #pprint("metadata={}".format(metadata)) #metadata.cluster_name = 'Ec2SpotEKS4' #pprint("metadata={}".format(metadata)) except Exception as e: print("Exception when calling CoreV1Api->create_namespace: %s\n" % e)
def schedule_pod(cli, name): target = client.V1ObjectReference() target.kind = 'Node' target.apiVersion = 'v1' target.name = 'k3d-k3s-default-worker-1' meta = client.V1ObjectMeta() meta.name = name body = client.V1Binding(metadata=meta, target=target) return cli.create_namespaced_binding('default', body)
def schedule(self, name, node, namespace='default'): '''Create a binding object to schedule the pod.''' target = client.V1ObjectReference(kind='Node', api_version='v1', name=node) meta = client.V1ObjectMeta(name=name) body = client.V1Binding(target=target, metadata=meta) self.api.create_namespaced_binding(namespace=namespace, body=body, _preload_content=False)
def scheduler(name, node, namespace='default'): target = client.V1ObjectReference() target.kind = 'Node' target.apiVersion = 'corev1' target.name = node print('target', target) meta = client.V1ObjectMeta() meta.name = name body = client.V1Binding(metadata=meta, target=target) return corev1.create_namespaced_binding(namespace=namespace, body=body)
def scheduler(name, node, namespace="default"): print(name) print(node) target=client.V1ObjectReference() target.kind="Node" target.apiVersion="v1" target.name= node meta=client.V1ObjectMeta() meta.name=name body=client.V1Binding(metadata=meta, target=target) return v1.create_namespaced_binding(namespace, body)
def add_owner_reference(obj, nb): owner_reference = client.V1ObjectReference( api_version=nb["apiVersion"], kind=nb["kind"], name=nb["metadata"]["name"], uid=nb["metadata"]["uid"], ) if not obj.metadata.owner_references: obj.metadata.owner_references = [] obj.metadata.owner_references.append(owner_reference)
def scheduler(name, node, namespace='default'): body = client.V1Binding() target = client.V1ObjectReference() target.kind = 'Node' target.apiVersion = 'v1' target.name = node meta = client.V1ObjectMeta() meta.name = name body.target = target body.metadata = meta return v1.create_namespaced_binding(name, namespace, body)
def create_endpoint_address(ip: str, pod: client.V1Pod) -> client.V1EndpointAddress: new_address = client.V1EndpointAddress(ip=ip, node_name=pod.spec.node_name) target_ref = client.V1ObjectReference( kind='Pod', name=pod.metadata.name, namespace=pod.metadata.namespace, uid=pod.metadata.uid, resource_version=pod.metadata.resource_version) new_address.target_ref = target_ref return new_address
def scheduler(name, node, namespace="custom-scheduler"): body = client.V1Binding() target = client.V1ObjectReference() target.kind = "Node" target.apiVersion = "v1" target.name = node meta = client.V1ObjectMeta() meta.name = name body.target = target body.metadata = meta return v1.create_namespaced_binding(namespace, body)
def _mock_vault_functionality(self): secret_dict = {key: "secret" for key in self.vault_secrets} VaultStore.get_secrets = unittest.mock.Mock(return_value=secret_dict) object_meta = client.V1ObjectMeta(name="test-service-account", namespace=self.namespace) secret = client.V1ObjectReference(name=self.vault_secret_name, namespace=self.namespace) service_account = client.V1ServiceAccount(metadata=object_meta, secrets=[secret]) get_k8s().v1api.read_namespaced_service_account = unittest.mock.Mock( return_value=service_account)
def send_k8s_event(magtape_pod_name, namespace, workload_type, workload, response_message): """Function to create a k8s event in the target namespace upon policy failure""" # Load k8s client config try: config.load_incluster_config() except config.ConfigException: try: config.load_kube_config() except config.ConfigException: raise Exception("Could not configure kubernetes python client") # Create an instance of the API class api_instance = client.CoreV1Api() k8s_event_time = datetime.datetime.now(datetime.timezone.utc) # Build involved object for k8s event k8s_involved_object = client.V1ObjectReference(name=workload, kind=workload_type, namespace=namespace) # Build metadata for k8s event k8s_event_metadata = client.V1ObjectMeta( generate_name="magtape-policy-failure.", namespace=namespace, labels={"magtape-event": "policy-failure"}, ) # Build body for k8s event k8s_event_body = client.V1Event( action="MagTape Policy Failure", event_time=k8s_event_time, first_timestamp=k8s_event_time, involved_object=k8s_involved_object, last_timestamp=k8s_event_time, message=response_message, metadata=k8s_event_metadata, reason="MagTapePolicyFailure", type="Warning", reporting_component="magtape", reporting_instance=magtape_pod_name, ) try: api_instance.create_namespaced_event(namespace, k8s_event_body) except ApiException as exception: app.logger.info( f"Exception when creating a namespace event: {exception}\n")