def deploy_storage_class(): """Deploys the default storage class for KaDalu if not exists""" api_instance = client.StorageV1Api() scs = api_instance.list_storage_class() sc_names = [] for tmpl in os.listdir(MANIFESTS_DIR): if tmpl.startswith("storageclass-"): sc_names.append( tmpl.replace("storageclass-", "").replace(".yaml.j2", "")) installed_scs = [item.metadata.name for item in scs.items] for sc_name in sc_names: filename = os.path.join(MANIFESTS_DIR, "storageclass-%s.yaml" % sc_name) if sc_name in installed_scs: logging.info( logf("Ignoring already deployed StorageClass", manifest=filename)) continue # Deploy Storage Class template(filename, namespace=NAMESPACE, kadalu_version=VERSION) execute(KUBECTL_CMD, "create", "-f", filename) logging.info(logf("Deployed StorageClass", manifest=filename))
def __init__(self, auth_dict): ''' set kubernetes configuration strings :param auth_dict: ''' try: self.configuration = client.Configuration() # kube cluster host self.configuration.host = auth_dict['host'] self.configuration.verify_ssl = True # certificate-authority-data self.configuration.ssl_ca_cert = auth_dict['ssl_ca_cert_file'] # client-certificate-data self.configuration.cert_file = auth_dict['ssl_cert_file'] # client-key-data self.configuration.key_file = auth_dict['ssl_key_file'] # http proxy self.configuration.proxy = auth_dict['proxy'] # check if debug env value is True/False to enable debugging self.configuration.debug = os.environ.get('KUBE_DEBUG', False) # kube cluster context self.configuration.api_key['context'] = auth_dict['context'] self.v1 = client.CoreV1Api(client.ApiClient(self.configuration)) self.v1_extension = client.ExtensionsV1beta1Api( client.ApiClient(self.configuration)) self.storage = client.StorageV1Api( client.ApiClient(self.configuration)) self.scale = client.AutoscalingV1Api( client.ApiClient(self.configuration)) except BaseException: logger_settings.logger.info( 'There is some generic problem parsing the config file')
def deploy_storage_class(obj): """Deploys the default and custom storage class for KaDalu if not exists""" # Deploy defalut Storage Class api_instance = client.StorageV1Api() scs = api_instance.list_storage_class() sc_names = [] for tmpl in os.listdir(MANIFESTS_DIR): if tmpl.startswith("storageclass-") and tmpl.endswith(".j2"): sc_names.append( tmpl.replace("storageclass-", "").replace(".yaml.j2", "")) installed_scs = [item.metadata.name for item in scs.items] for sc_name in sc_names: filename = os.path.join(MANIFESTS_DIR, "storageclass-%s.yaml" % sc_name) if sc_name in installed_scs: logging.info( logf("StorageClass already present, continuing with Apply", manifest=filename)) template(filename, namespace=NAMESPACE, kadalu_version=VERSION, hostvol_name=obj["metadata"]["name"], kadalu_format=obj["spec"].get("kadalu_format", "native")) lib_execute(KUBECTL_CMD, APPLY_CMD, "-f", filename) logging.info(logf("Deployed StorageClass", manifest=filename))
def check_storage_class_deleted(sc_name, created_objects): """ checks storage class sc_name deleted if sc not deleted , asserts """ if sc_name == "" or keep_objects: return count = 12 api_instance = client.StorageV1Api() while (count > 0): try: api_response = api_instance.read_storage_class(name=sc_name, pretty=True) LOGGER.debug(str(api_response)) count = count - 1 time.sleep(15) LOGGER.info( f'SC Delete : Checking deletion for StorageClass {sc_name}') except ApiException: LOGGER.info(f'SC Delete : StorageClass {sc_name} has been deleted') return LOGGER.error(f'StorageClass {sc_name} is not deleted') clean_with_created_objects(created_objects) assert False
def create_api_client(api="BatchV1"): """Create Kubernetes API client using config. :param api: String which represents which Kubernetes API to spawn. By default BatchV1. :returns: Kubernetes python client object for a specific API i.e. BatchV1. """ k8s_config.load_incluster_config() api_configuration = client.Configuration() api_configuration.verify_ssl = False if api == "extensions/v1beta1": api_client = client.ExtensionsV1beta1Api() elif api == "CoreV1": api_client = client.CoreV1Api() elif api == "StorageV1": api_client = client.StorageV1Api() elif api == "AppsV1": api_client = client.AppsV1Api() elif api == "networking.k8s.io/v1beta1": api_client = client.NetworkingV1beta1Api() elif api == "CustomObjectsApi": api_client = client.CustomObjectsApi() else: api_client = client.BatchV1Api() return api_client
def confirm_storage_class(context): if context is None: raise SystemExit("invalid empty context for StorageClass given") load_kube(context) api = client.StorageV1Api() return general_confirm("StorageClass", lambda: api.list_storage_class(), lambda i: i.metadata.name)
def _detect_api_object(self, api_version): # Due to https://github.com/kubernetes-client/python/issues/387 if api_version == 'apps/v1beta1': return client.AppsV1beta1Api() if api_version == 'v1': return client.CoreV1Api() if api_version == 'extensions/v1beta1': return client.ExtensionsV1beta1Api() if api_version == 'batch/v1': return client.BatchV1Api() if api_version == 'batch/v2alpha1': return client.BatchV2alpha1Api() if api_version == 'batch/v1beta1': return client.BatchV1beta1Api() if api_version == 'policy/v1beta1': return client.PolicyV1beta1Api() if api_version == 'storage.k8s.io/v1': return client.StorageV1Api() if api_version == 'apps/v1': return client.AppsV1Api() if api_version == 'autoscaling/v1': return client.AutoscalingV1Api() if api_version == 'rbac.authorization.k8s.io/v1': return client.RbacAuthorizationV1Api() if api_version == 'scheduling.k8s.io/v1alpha1': return client.SchedulingV1alpha1Api() if api_version == 'scheduling.k8s.io/v1beta1': return client.SchedulingV1beta1Api() if api_version == 'test/test': return K8sClientMock(self.name)
def check_storage_class(sc_name): """ Checks storage class exists or not Args: param1: sc_name - name of storage class to be checked Returns: return True , if storage class exists return False , if storage class does not exists Raises: None """ if sc_name == "": return False api_instance = client.StorageV1Api() try: # TBD: Show StorageClass Parameter in tabular Form api_response = api_instance.read_storage_class(name=sc_name, pretty=True) LOGGER.info( f'SC Check : Storage class {sc_name} does exists on the cluster') LOGGER.debug(str(api_response)) return True except ApiException: LOGGER.info("strorage class does not exists") return False
def __init__(self): config.load_kube_config() self.K8SAppsV1Client = client.AppsV1Api() self.K8SCoreV1Client = client.CoreV1Api() self.K8SStorageV1Client = client.StorageV1Api() self.K8SRbacAuthorizationV1Client = client.RbacAuthorizationV1Api() self.K8SPolicyV1beta1Client = client.PolicyV1beta1Api()
def create_kube_clients(kube_host=os.getenv('KUBE_HOST'), kube_token=os.getenv('KUBE_TOKEN')): # configure client config = client.Configuration() config.host = kube_host config.api_key['authorization'] = kube_token config.api_key_prefix['authorization'] = 'Bearer' config.verify_ssl = False # config.assert_hostname = False # create API instance api_client = client.ApiClient() kube = client.CoreV1Api(client.ApiClient(config)) extension_api = client.ExtensionsV1beta1Api(client.ApiClient(config)) appsv1_api = client.AppsV1Api(client.ApiClient(config)) batchv1_api = client.BatchV1Api(client.ApiClient(config)) storageV1Api = client.StorageV1Api(client.ApiClient(config)) # return kube, extension_api, appsv1_api, api_client, batchv1_api, storageV1Api return SimpleNamespace(kube=kube, extension_api=extension_api, appsv1_api=appsv1_api, api_client=api_client, batchv1_api=batchv1_api, storageV1Api=storageV1Api)
def __init__(self, token=None, ca_file=None, context=None, host='127.0.0.1', port=443, user='******', debug=False, namespace=None, readwritemany=False): self.host = host self.port = port self.user = user self.ca_file = ca_file self.readwritemany = readwritemany self.context = context self.accessmode = 'ReadWriteMany' if readwritemany else 'ReadWriteOnce' self.conn = 'OK' self.namespace = namespace self.token = token api_client = None if host is not None and port is not None and token is not None: configuration = client.Configuration() configuration.host = "https://%s:%s" % (host, port) configuration.api_key = {"authorization": "Bearer " + token} if ca_file is not None: configuration.ssl_ca_cert = ca_file else: configuration.verify_ssl = False api_client = client.ApiClient(configuration) else: contexts, current = config.list_kube_config_contexts() if context is not None: contexts = [ entry for entry in contexts if entry['name'] == context ] if contexts: context = contexts[0] contextname = context['name'] else: self.conn = None else: context = current contextname = current['name'] self.contextname = contextname config.load_kube_config(context=contextname) if namespace is None and 'namespace' in context['context']: self.namespace = context['context']['namespace'] if 'cluster' in context['context'] and ':' in context['context'][ 'cluster']: self.host = context['context']['cluster'].split( ':')[0].replace('-', '.') self.core = client.CoreV1Api(api_client=api_client) self.v1beta = client.ExtensionsV1beta1Api(api_client=api_client) self.storageapi = client.StorageV1Api(api_client=api_client) self.api_client = api_client self.debug = debug if self.namespace is None: self.namespace = 'default' return
def confirm_volume_attachment(context): if context is None: raise SystemExit("invalid empty context for VolumeAttachment given") load_kube(context) api = client.StorageV1Api() return general_confirm("VolumeAttachment", lambda: api.list_volume_attachment(), lambda i: i.metadata.name)
async def watch_va_async(): log = logging.getLogger('va_events') log.setLevel(logging.INFO) storage = client.StorageV1Api() w = watch.Watch() for event in w.stream(storage.list_volume_attachment): process_va_event(log, event) await asyncio.sleep(0)
def k8s_storage_client(k8s_conf): """ Retrieves the kubernetes storage client :param k8s_conf: the k8s configuration used to deploy the cluster :return: a kubernetes.client.NetworkingV1Api instance """ logger.debug('Retrieving K8s networking API client') return client.StorageV1Api(get_client_conn(k8s_conf))
def check_pool(self, pool): storage = client.StorageV1Api() storageclasses = storage.list_storage_class().items if storageclasses: storageclasses = [s.metadata.name for s in storageclasses] if pool in storageclasses: return pool common.pprint("Pool %s not found. Using None" % pool, color='blue') return None
def __init__(self, cluster_spec, cluster_conf=None, kubeconfig=None, kubeconfig_context=None, production=False): """Initialise Kubernetes specific ReanaBackend-object. :param cluster_spec: Dictionary representing complete REANA cluster spec file. :param cluster_conf: A generator/iterable of Kubernetes YAML manifests of REANA components as Python objects. If set to `None` cluster_conf will be generated from manifest templates in `templates` folder specified in `_conf.templates_folder` :param kubeconfig: Name of the kube-config file to use for configuring reana-cluster. If set to `None` then `$HOME/.kube/config` will be used. Note: Might pickup a config-file defined in $KUBECONFIG as well. :param kubeconfig_context: set the active context. If is set to `None`, current_context from config file will be used. :param production: Boolean which represents whether REANA is is configured with production setup (using CEPH) or not. """ logging.debug('Creating a ReanaBackend object ' 'for Kubernetes interaction.') # Load Kubernetes cluster configuration. If reana-cluster.yaml # doesn't specify this K8S Python API defaults to '$HOME/.kube/config' self.kubeconfig = kubeconfig or \ cluster_spec['cluster'].get('config', None) self.kubeconfig_context = kubeconfig_context or \ cluster_spec['cluster'].get('config_context', None) k8s_api_client_config = Configuration() k8s_config.load_kube_config(kubeconfig, self.kubeconfig_context, k8s_api_client_config) Configuration.set_default(k8s_api_client_config) # Instantiate clients for various Kubernetes REST APIs self._corev1api = k8s_client.CoreV1Api() self._versionapi = k8s_client.VersionApi() self._extbetav1api = k8s_client.ExtensionsV1beta1Api() self._rbacauthorizationv1api = k8s_client.RbacAuthorizationV1Api() self._storagev1api = k8s_client.StorageV1Api() self.k8s_api_client_config = k8s_api_client_config self.cluster_spec = cluster_spec self.cluster_conf = cluster_conf or \ self.generate_configuration(cluster_spec, production=production)
def __init__(self, api_client): # type: (client.ApiClient) -> None super(ApiData, self).__init__() logging.info('Collecting API data') logging.debug('Constructing API client wrappers') core_api = client.CoreV1Api(api_client) storage_api = client.StorageV1Api(api_client) rbac_authorization_api = client.RbacAuthorizationV1Api(api_client) apps_api = client.ExtensionsV1beta1Api(api_client) self.custom_api = client.CustomObjectsApi(api_client) logging.debug('Retrieving data') storage_classes = storage_api.list_storage_class() namespaces = core_api.list_namespace() roles = rbac_authorization_api.list_role_for_all_namespaces() cluster_roles = rbac_authorization_api.list_cluster_role() component_statuses = core_api.list_component_status() nodes = core_api.list_node() # Try to make it a post, when client api support sending post data # include {"num_stats": 1} to get the latest only and use less bandwidth nodes_stats = [ core_api.connect_get_node_proxy_with_path(node.metadata.name, "stats") for node in nodes.items ] pvs = core_api.list_persistent_volume() pvcs = core_api.list_persistent_volume_claim_for_all_namespaces() pods = core_api.list_pod_for_all_namespaces() services = core_api.list_service_for_all_namespaces() deployments = apps_api.list_deployment_for_all_namespaces() logging.debug('Assigning collected data') self.storage_classes = StorageClassList(map(StorageClass, storage_classes.items)) self.namespaces = NamespaceList(map(Namespace, namespaces.items)) self.roles = RoleList(map(Role, roles.items)) self.cluster_roles = RoleList(map(Role, cluster_roles.items)) self.component_statuses = ComponentStatusList(map(ComponentStatus, component_statuses.items)) self.nodes = NodeList(map(Node, nodes.items, nodes_stats)) self.persistent_volumes = PersistentVolumeList(map(PersistentVolume, pvs.items)) self.persistent_volume_claims = PersistentVolumeClaimList( map(PersistentVolumeClaim, pvcs.items)) self.pods = PodList(map(Pod, pods.items)) self.services = ServiceList(map(Service, services.items)) self.deployments = DeploymentList(map(Deployment, deployments.items)) pods_custom_metrics = { "memory": ['memory_rss', 'memory_swap', 'memory_usage_bytes', 'memory_max_usage_bytes'], "fs": ['fs_inodes', 'fs_reads', 'fs_writes', 'fs_limit_bytes', 'fs_usage_bytes'], "cpu": ['cpu_system', 'cpu_user', 'cpu_usage'] } self.pods_Metrics = dict() # type: Dict[str, Dict[str, List]] for metric_group, metrics in pods_custom_metrics.items(): self.pods_Metrics[metric_group] = self.get_namespaced_group_metric(metrics)
def remove_storage_class(context, name): if context is None: raise SystemExit("invalid empty context for StorageClass given") if name is None: raise SystemExit("invalid empty name for StorageClass given") load_kube(context) api = client.StorageV1Api() ret, status, _ = api.delete_storage_class_with_http_info(name) handle_status(ret, status, "StorageClass", None, name)
def remove_volume_attachment(context, name): if context is None: raise SystemExit("invalid empty context for VolumeAttachment given") if name is None: raise SystemExit("invalid empty name for VolumeAttachment given") load_kube(context) api = client.StorageV1Api() ret, status, _ = api.delete_volume_attachment_with_http_info(name) handle_status(ret, status, "VolumeAttachment", None, name)
def wait_for_volume_attachment_is_up(context, name): if name is None: raise SystemExit("invalid empty name for VolumeAttachment given") if context is None: raise SystemExit("invalid empty name context given") load_kube(context) print("check availability of", "VolumeAttachment", name) api = client.StorageV1Api() return general_up_check( None, "VolumeAttachment", name, lambda: api.read_volume_attachment_with_http_info(name))
def wait_for_storage_class_is_away(context, name): if name is None: raise SystemExit("invalid empty name for StorageClass given") if context is None: raise SystemExit("invalid empty name context given") load_kube(context) print("check removal of", "StorageClass", name) api = client.StorageV1Api() return general_away_check( None, "StorageClass", name, lambda: api.read_storage_class_with_http_info(name))
def get_default_storage_class(): """ Gets the default storage class of the cluster :return: the default storage class of the cluster """ config.load_kube_config() v1 = client.StorageV1Api() sc = v1.list_storage_class() for i in sc.items: if 'storageclass.kubernetes.io/is-default-class' in i.metadata.annotations: return i.metadata.name
def deploy_storage_class(): api_instance = client.StorageV1Api() scs = api_instance.list_storage_class(include_uninitialized=True) for sc in scs.items: if sc.metadata.name == STORAGE_CLASS_NAME: return # Deploy Storage Class filename = os.path.join(MANIFESTS_DIR, "storageclass.yaml") template(filename, namespace=NAMESPACE, kadalu_version=VERSION) execute(kubectl_cmd, "create", "-f", filename) info("Deployed StorageClass", manifest=filename)
def yaml_json_add(request): user = user_get(request) hostname = request.POST.get('hostname') auth_config(user, hostname) core_api = client.CoreV1Api() # namespace,pod,service,pv,pvc apps_api = client.AppsV1Api() # deployment batch1_api = client.BatchV1Api() batch2_api = client.BatchV2alpha1Api() networking_api = client.NetworkingV1beta1Api() # ingress storage_api = client.StorageV1Api() # storage_class namespace = request.POST.get('namespace', None) resource = request.POST.get('resource', None) name = request.POST.get('name', None) types = request.POST.get('types') if types == "yaml": body = request.POST.get('yaml', None) body = yaml.safe_load(body) else: body = request.POST.get('json', None) try: if body['kind'] == "Namespace": core_api.create_namespace(body) elif body['kind'] == "Deployment": apps_api.create_namespaced_deployment(namespace, body) elif body['kind'] == "DaemonSet": apps_api.create_namespaced_daemon_set(namespace, body) elif body['kind'] == "StatefulSet": apps_api.create_namespaced_stateful_set(namespace, body) elif body['kind'] == "Service": core_api.create_namespaced_service(namespace, body) elif body['kind'] == "Pod": core_api.create_namespaced_pod(namespace, body) elif body['kind'] == "Ingress": networking_api.create_namespaced_ingress(namespace, body) elif body['kind'] == "PersistentVolume": core_api.create_persistent_volume(body) elif body['kind'] == "PersistentVolumeClaim": core_api.create_namespaced_persistent_volume_claim(namespace, body) elif body['kind'] == "ConfigMap": core_api.create_namespaced_config_map(namespace, body) elif body['kind'] == "Secret": core_api.create_namespaced_secret(namespace, body) elif body['kind'] == "CronJob": batch2_api.create_namespaced_cron_job(namespace, body) elif body['kind'] == "Job": batch1_api.create_namespaced_job(namespace, body) data = {"code": 0, "msg": f"{body['kind']}创建成功"} return data except Exception as e: data = error(e) return data
def __init__(self): # https://github.com/kubernetes-client/python/issues/309 warnings.simplefilter("ignore", ResourceWarning) self.config = config.load_kube_config() self.k8s_client = client.ApiClient() self.core_v1 = client.CoreV1Api() self.apps_v1 = client.AppsV1Api() self.batch_v1_beta1 = client.BatchV1beta1Api() self.custom_objects_api = client.CustomObjectsApi() self.policy_v1_beta1 = client.PolicyV1beta1Api() self.storage_v1_api = client.StorageV1Api()
def deploy_storage_class(): """Deploys the default storage class for KaDalu if not exists""" api_instance = client.StorageV1Api() scs = api_instance.list_storage_class(include_uninitialized=True) for item in scs.items: if item.metadata.name == STORAGE_CLASS_NAME: return # Deploy Storage Class filename = os.path.join(MANIFESTS_DIR, "storageclass.yaml") template(filename, namespace=NAMESPACE, kadalu_version=VERSION) execute(KUBECTL_CMD, "create", "-f", filename) info("Deployed StorageClass", manifest=filename)
def get_kube_object(self, host=None, port=6443, token=None): if host is None and token is None: host = self.master_machine[IP_ADDRESS] token = self.kube_auth_token configuration = client.Configuration() configuration.host = "https://" + host + ":" + str(port) configuration.verify_ssl = False configuration.debug = True configuration.api_key = {"authorization": "Bearer " + token} client.Configuration.set_default(configuration) v1_api = client.CoreV1Api() self.set_kube_object(v1_api) storage_api = client.StorageV1Api(client.ApiClient(configuration)) self.set_storage_api(storage_api)
def delete_storage_class(sc_name, created_objects): """deletes storage class sc_name""" if sc_name == "" or keep_objects: return api_instance = client.StorageV1Api() try: LOGGER.info(f'SC Delete : deleting storage class {sc_name}') api_response = api_instance.delete_storage_class( name=sc_name, pretty=True, grace_period_seconds=0) LOGGER.debug(str(api_response)) created_objects["sc"].remove(sc_name) except ApiException as e: LOGGER.error( f"Exception when calling StorageV1Api->delete_storage_class: {e}") clean_with_created_objects(created_objects) assert False
def fake_k8s_client_dict(): k8s_client_dict = { 'v1': client.CoreV1Api(), 'apiregistration.k8s.io/v1': client.ApiregistrationV1Api(), 'apps/v1': client.AppsV1Api(), 'authentication.k8s.io/v1': client.AuthenticationV1Api(), 'authorization.k8s.io/v1': client.AuthorizationV1Api(), 'autoscaling/v1': client.AutoscalingV1Api(), 'batch/v1': client.BatchV1Api(), 'coordination.k8s.io/v1': client.CoordinationV1Api(), 'networking.k8s.io/v1': client.NetworkingV1Api(), 'rbac.authorization.k8s.io/v1': client.RbacAuthorizationV1Api(), 'scheduling.k8s.io/v1': client.SchedulingV1Api(), 'storage.k8s.io/v1': client.StorageV1Api() } return k8s_client_dict
def deploy_storage_class(): """Deploys the default storage class for KaDalu if not exists""" api_instance = client.StorageV1Api() scs = api_instance.list_storage_class() create_cmd = "create" for item in scs.items: if item.metadata.name.startswith(STORAGE_CLASS_NAME_PREFIX): logging.info("Updating already deployed StorageClass") create_cmd = "apply" # Deploy Storage Class filename = os.path.join(MANIFESTS_DIR, "storageclass.yaml") template(filename, namespace=NAMESPACE, kadalu_version=VERSION) execute(KUBECTL_CMD, create_cmd, "-f", filename) logging.info(logf("Deployed StorageClass", manifest=filename))