def version_api_client_from_config(config): if config is None: k8sconfig.load_incluster_config() return k8sclient.VersionApi() else: client = k8sclient.ApiClient(configuration=config) return k8sclient.VersionApi(api_client=client)
def get_release_version(self) -> str: """Return the current manager version. Returns: str: The current manager version. """ return client.VersionApi().get_code().git_version
def __call__(self, pl, segment_info, show_kube_logo=True, show_cluster=True, show_namespace=True, show_default_namespace=False, alerts=[], **kwargs): pl.debug('Running powerline-kubernetes') kube_config_location = segment_info['environ'].get( 'KUBECONFIG', '~/.kube/config') self.pl = pl self.show_kube_logo = show_kube_logo self.show_cluster = show_cluster self.show_namespace = show_namespace self.show_default_namespace = show_default_namespace self.alerts = alerts try: k8s_merger = kube_config.KubeConfigMerger(kube_config_location) k8s_loader = kube_config.KubeConfigLoader( config_dict=k8s_merger.config) current_context = k8s_loader.current_context ctx = current_context['context'] context = current_context['name'] try: namespace = ctx['namespace'] except KeyError: namespace = 'default' current_time = time.monotonic() if current_time - self.last_api_server_check > self.api_server_check_interval: self.last_api_server_check = current_time k8s_merger.save_changes() client_config = kubernetes_client.Configuration() k8s_loader.load_and_set(client_config) version_api = kubernetes_client.VersionApi( kubernetes_client.ApiClient(configuration=client_config)) try: pl.debug(version_api.get_code()) except Exception as e: pl.error(e) self.api_server_alive = False return else: self.api_server_alive = True elif not self.api_server_alive: pl.debug('Assuming kube-apiserver is still dead.') return except Exception as e: pl.error(e) return return self.build_segments(context, namespace)
def deploy_csi_pods(core_v1_client): """ Look for CSI pods, if any one CSI pod found then that means it is deployed """ create_cmd = "create" pods = core_v1_client.list_namespaced_pod(NAMESPACE) for pod in pods.items: if pod.metadata.name.startswith(CSI_POD_PREFIX): logging.info("Updating already deployed CSI pods") create_cmd = "apply" # Deploy CSI Pods api_instance = client.VersionApi().get_code() if api_instance.major > "1" or api_instance.major == "1" and \ api_instance.minor >= "14": filename = os.path.join(MANIFESTS_DIR, "csi-driver-object.yaml") template(filename, namespace=NAMESPACE, kadalu_version=VERSION) execute(KUBECTL_CMD, create_cmd, "-f", filename) else: filename = os.path.join(MANIFESTS_DIR, "csi-driver-crd.yaml") template(filename, namespace=NAMESPACE, kadalu_version=VERSION) execute(KUBECTL_CMD, create_cmd, "-f", filename) filename = os.path.join(MANIFESTS_DIR, "csi.yaml") docker_user = os.environ.get("DOCKER_USER", "kadalu") template(filename, namespace=NAMESPACE, kadalu_version=VERSION, docker_user=docker_user, k8s_dist=K8S_DIST, kubelet_dir=KUBELET_DIR) execute(KUBECTL_CMD, create_cmd, "-f", filename) logging.info(logf("Deployed CSI Pods", manifest=filename))
def main(rounds, *args, **kwargs): global v1 try: config.load_kube_config() version = client.VersionApi().get_code() logging.info( f"Connected to {Configuration._default.host} - {version.git_version}" ) except Exception as e: logging.error(f"Kubernetes version check failed: {e}") sys.exit(1) v1 = client.CoreV1Api() ns = client.V1Namespace(metadata=client.V1ObjectMeta(name=namespace)) try: v1.create_namespace(ns) except client.exceptions.ApiException as e: if e.status == 409: pass else: raise e for _ in range(rounds): i = random.randint(0, 9) if i in [0, 1]: create_configmap() elif i in [2, 3, 4, 5, 6]: update_configmap() elif i in [7, 8]: delete_configmap() else: list_configmaps()
def init_kubernetes_client(self): from kubernetes.client.rest import ApiException from kubernetes import client as kubernetes_client, config as kubernetes_config kubernetes_config.load_incluster_config() self.api_instance = kubernetes_client.VersionApi( kubernetes_client.ApiClient()) self.ApiException = ApiException
def get_server_version(configuration): api_instance = kube_client.VersionApi(kube_client.ApiClient(configuration)) try: api_response = api_instance.get_code() return api_response.git_version except Exception as e: # pylint: disable=broad-except logger.warning("Unable to fetch kubernetes version: %s\n", e)
def get_node_data(cluster_id): """ Fetch node data using k8s API :param cluster_id: :return: """ try: # fetching the token from secret of the namespace 'dashboard' _TOKEN = [ base64.b64decode(secret_item.data['token']).decode('UTF-8') for secret_item in client.CoreV1Api().list_namespaced_secret( 'dashboard').items if base64.b64decode(secret_item.data['namespace']).decode('UTF-8') == 'dashboard' ][0] # generating User Agent version_detail = client.VersionApi().get_code() _USER_AGENT = 'kubectl/' + version_detail.git_version + ' (' + version_detail.platform + ') ' + 'kubernetes/' + \ version_detail.git_commit[0:7] _HEADERS = { 'User-Agent': _USER_AGENT, 'Accept': 'application/json', 'Authorization': f"Bearer {_TOKEN}" } # generating url for node listing _URL = client.Configuration().host + '/api/v1/nodes' requests.packages.urllib3.disable_warnings( category=InsecureRequestWarning) return requests.get(url=_URL, headers=_HEADERS, verify=False).json()['items'] except Exception as e: logger.error(e)
def validate_cluster(splunk, record): from kubernetes import client try: connection_stanza = splunklib.client.Stanza(splunk, "", skip_refresh=True) connection_stanza.refresh( state=splunklib.data.record({"content": record})) config = create_client_configuration(connection_stanza) api_client = client.ApiClient(config) version_api = client.VersionApi(api_client) version_api.get_code() except errors.ApplicationError as e: raise Exception("Could not connect to Kubernetes.\n\n%s" % e) except Exception: raise Exception(traceback.format_exc()) try: extensions_api = client.ApiextensionsV1beta1Api(api_client) crd = extensions_api.read_custom_resource_definition( "standalones.enterprise.splunk.com") if crd.spec.version != "v1alpha2": raise errors.ApplicationError( "Unexpected Splunk Operator version: %s" % crd.spec.version) except client.rest.ApiException as e: if e.status == 404: raise errors.ApplicationError("Could not find Splunk Operator.") raise except errors.ApplicationError: raise except Exception: raise Exception(traceback.format_exc()) try: indexer_server_count = 0 for server in record.indexer_server.split(","): components = server.split(":") if len(components) != 2: raise errors.ApplicationError( "Expect format \"<server>:<port>,...\" for indexer server. Got \"%s\"" % (server)) hostname = components[0].strip() port = int(components[1].strip()) import socket s = socket.socket() try: s.connect((hostname, port)) except Exception as e: raise errors.ApplicationError( "Could not connect to indexer server \"%s\": %s" % (server, e)) finally: s.close() indexer_server_count += 1 if indexer_server_count == 0: raise errors.ApplicationError("Invalid or misssing indexer server") except errors.ApplicationError: raise except Exception: raise Exception(traceback.format_exc())
def get_server_version(configuration): api_instance = kube_client.VersionApi(kube_client.ApiClient(configuration)) try: api_response = api_instance.get_code() return api_response.git_version except Exception as e: # pylint: disable=broad-except telemetry.set_exception(exception=e, fault_type=Get_Kubernetes_Version_Fault_Type, summary='Unable to fetch kubernetes version') logger.warning("Unable to fetch kubernetes version: %s\n", e)
def get_server_version(configuration): api_instance = kube_client.VersionApi(kube_client.ApiClient(configuration)) try: api_response = api_instance.get_code() return api_response.git_version except Exception as e: # pylint: disable=broad-except logger.warning("Unable to fetch kubernetes version.") utils.kubernetes_exception_handler(e, consts.Get_Kubernetes_Version_Fault_Type, 'Unable to fetch kubernetes version', raise_error=False)
def __init__(self, cluster_spec, cluster_conf=None, kubeconfig=None, kubeconfig_context=None, production=False): """Initialise Kubernetes specific ReanaBackend-object. :param cluster_spec: Dictionary representing complete REANA cluster spec file. :param cluster_conf: A generator/iterable of Kubernetes YAML manifests of REANA components as Python objects. If set to `None` cluster_conf will be generated from manifest templates in `templates` folder specified in `_conf.templates_folder` :param kubeconfig: Name of the kube-config file to use for configuring reana-cluster. If set to `None` then `$HOME/.kube/config` will be used. Note: Might pickup a config-file defined in $KUBECONFIG as well. :param kubeconfig_context: set the active context. If is set to `None`, current_context from config file will be used. :param production: Boolean which represents whether REANA is is configured with production setup (using CEPH) or not. """ logging.debug('Creating a ReanaBackend object ' 'for Kubernetes interaction.') # Load Kubernetes cluster configuration. If reana-cluster.yaml # doesn't specify this K8S Python API defaults to '$HOME/.kube/config' self.kubeconfig = kubeconfig or \ cluster_spec['cluster'].get('config', None) self.kubeconfig_context = kubeconfig_context or \ cluster_spec['cluster'].get('config_context', None) k8s_api_client_config = Configuration() k8s_config.load_kube_config(kubeconfig, self.kubeconfig_context, k8s_api_client_config) Configuration.set_default(k8s_api_client_config) # Instantiate clients for various Kubernetes REST APIs self._corev1api = k8s_client.CoreV1Api() self._versionapi = k8s_client.VersionApi() self._extbetav1api = k8s_client.ExtensionsV1beta1Api() self._rbacauthorizationv1api = k8s_client.RbacAuthorizationV1Api() self._storagev1api = k8s_client.StorageV1Api() self.k8s_api_client_config = k8s_api_client_config self.cluster_spec = cluster_spec self.cluster_conf = cluster_conf or \ self.generate_configuration(cluster_spec, production=production)
def save_kubernetes_info(): """ Save Kubernetes Cluster Info """ config.load_kube_config(settings.getValue('K8S_CONFIG_FILEPATH')) with open( os.path.join(settings.getValue('RESULTS_PATH'), 'cloud_info.txt'), 'a+') as outf: api = client.CoreV1Api() try: node_info = api.list_node() except ApiException as err: raise Exception from err for ni_item in node_info.items: outf.write("\n ******************************************** \n") outf.write("\n System Information \n") sinfo = { 'Architecture': ni_item.status.node_info.architecture, 'Container Runtime Version': ni_item.status.node_info.container_runtime_version, 'kernel version': ni_item.status.node_info.kernel_version, 'Kube Proxy Version': ni_item.status.node_info.kube_proxy_version, 'Kubelet Version': ni_item.status.node_info.kubelet_version, 'Operating System': ni_item.status.node_info.operating_system, 'OS Image': ni_item.status.node_info.os_image } json.dump(sinfo, outf, indent=4) outf.write("\n List of Addresses \n") addresses = [] for addrs in ni_item.status.addresses: entry = {'address': addrs.address, 'type': addrs.type} addresses.append(entry) json.dump(entry, outf, indent=4) sinfo['List of Addresses'] = entry outf.write("\n Allocatable Resources \n") sinfo['Allocatable Resources'] = ni_item.status.allocatable json.dump(ni_item.status.allocatable, outf, indent=4) outf.write("\n Available Resources \n") sinfo['Available Resources'] = ni_item.status.capacity json.dump(ni_item.status.capacity, outf, indent=4) api = client.VersionApi() try: version_info = api.get_code() except ApiException as err: raise Exception from err outf.write("\n Version Information \n") vinfo = { 'git_commit': version_info.git_commit, 'git_version': version_info.git_version, 'platform': version_info.platform, 'go_version': version_info.go_version } #json.dump(vinfo, outf, indent=4) result = {**sinfo, **vinfo} return result
def get_kubernetes_version(passed_kubeconfig_value): config.load_kube_config(config_file=passed_kubeconfig_value) api_instance = client.VersionApi() try: api_response = api_instance.get_code() api_response = api_response.__dict__ LOGGER.info(f"kubernetes version is {api_response['_git_version']}") LOGGER.info(f"platform is {api_response['_platform']}") except ApiException as e: LOGGER.info(f"Kubernetes version cannot be fetched due to {e}")
def _handler_provision(command, resources, priority_evaluator, use_kubeconfig, sync_mode, show_logs): kubeconfig_namespace = None if priority_evaluator.environment_deprecated(): log.warning( "K8S_HOST and K8S_CA environment variables support is deprecated " "and will be discontinued in the future. Use K8S_MASTER_URI and K8S_CA_BASE64 instead." ) # INFO rvadim: https://github.com/kubernetes-client/python/issues/430#issuecomment-359483997 if use_kubeconfig: try: load_kube_config() kubeconfig_namespace = list_kube_config_contexts()[1].get( 'context').get('namespace') except Exception as e: raise RuntimeError(e) else: client.Configuration.set_default( priority_evaluator.k8s_client_configuration()) settings.K8S_NAMESPACE = priority_evaluator.k8s_namespace_default( kubeconfig_namespace) log.info('Default namespace "{}"'.format(settings.K8S_NAMESPACE)) if not settings.K8S_NAMESPACE: log.info( "Default namespace is not set. " "This may lead to provisioning error, if namespace is not set for each resource." ) try: deprecation_checker = ApiDeprecationChecker( client.VersionApi().get_code().git_version[1:]) available_checker = ResourceAvailabilityChecker( make_resource_getters_list()) for resource in resources: deprecation_checker.run(resource) available_checker.run(resource) except client.exceptions.ApiException: log.warning( "Error while getting API version, deprecation check will be skipped." ) if command == COMMAND_DIFF: executor = Diff() else: executor = Provisioner(command, sync_mode, show_logs) for resource in resources: executor.run(resource)
def deploy_csi_pods(core_v1_client): """ Look for CSI pods, if any one CSI pod found then that means it is deployed """ pods = core_v1_client.list_namespaced_pod(NAMESPACE) for pod in pods.items: if pod.metadata.name.startswith(CSI_POD_PREFIX): logging.info("Updating already deployed CSI pods") # Deploy CSI Pods api_instance = client.VersionApi().get_code() if api_instance.major > "1" or api_instance.major == "1" and \ api_instance.minor >= "22": csi_driver_version = csi_driver_object_api_version() if csi_driver_version is not None and \ csi_driver_version != "v1": lib_execute(KUBECTL_CMD, DELETE_CMD, "csidriver", "kadalu") logging.info( logf("Deleted existing CSI Driver object", csi_driver_version=csi_driver_version)) filename = os.path.join(MANIFESTS_DIR, "csi-driver-object-v1.yaml") template(filename, namespace=NAMESPACE, kadalu_version=VERSION) lib_execute(KUBECTL_CMD, APPLY_CMD, "-f", filename) elif api_instance.major > "1" or api_instance.major == "1" and \ api_instance.minor >= "14": filename = os.path.join(MANIFESTS_DIR, "csi-driver-object.yaml") template(filename, namespace=NAMESPACE, kadalu_version=VERSION) lib_execute(KUBECTL_CMD, APPLY_CMD, "-f", filename) else: filename = os.path.join(MANIFESTS_DIR, "csi-driver-crd.yaml") template(filename, namespace=NAMESPACE, kadalu_version=VERSION) lib_execute(KUBECTL_CMD, APPLY_CMD, "-f", filename) filename = os.path.join(MANIFESTS_DIR, "csi.yaml") docker_user = os.environ.get("DOCKER_USER", "kadalu") template( filename, namespace=NAMESPACE, kadalu_version=VERSION, docker_user=docker_user, k8s_dist=K8S_DIST, kubelet_dir=KUBELET_DIR, verbose=VERBOSE, ) lib_execute(KUBECTL_CMD, APPLY_CMD, "-f", filename) logging.info(logf("Deployed CSI Pods", manifest=filename))
def _get_version_code(self): version_api = client.VersionApi() major = 1 minor = 16 try: api_response = version_api.get_code() major = int(api_response.major) minor = int(api_response.minor) except ApiException as e: LOG.error("Exception when calling VersionApi->get_code: %s", e) return major, minor
def get(self): data = { 'kubernetes_version': None } try: version = client.VersionApi().get_code() data['kubernetes_version'] = version.git_version except Exception: cherrypy.response.status = 503 self.logger.exception("Error getting kubernetes version") return data
def init_cluster(self, cluster_endpoint, user, pwd, ca_cert): """ Connect your kubernetes (k8s) cluster""" try: with open(ca_cert) as _file: ca_cert_data = base64.b64encode(_file.read().encode()) except Exception as e: print("Unable to read ca_cert file, error: {}".format(e)) sys.exit(1) auth_template = copy.deepcopy(AUTH_TEMPLATE) cluster = auth_template['clusters'][0]['cluster'] user_data = auth_template['users'][0]['user'] cluster['certificate-authority-data'] = ca_cert_data.decode() cluster['server'] = 'https://{}'.format(cluster_endpoint) user_data['username'] = user user_data['password'] = pwd _, temp_config = tempfile.mkstemp() with open(temp_config, 'w') as fd: yaml.safe_dump(auth_template, fd) try: api_client = _get_k8s_api_client(temp_config) api = client.VersionApi(api_client) code = api.get_code() print("Connectivity with k8s cluster api [ok]") print("k8s cluster version - {}".format(code.git_version)) except Exception as e: print( "Unable to communicate with k8s cluster {}, error: {}".format( cluster_endpoint, e)) sys.exit(1) os.remove(temp_config) if not os.path.exists(KUBE_DIR_PATH): os.mkdir(KUBE_DIR_PATH) print("Created directory [{}]".format(KUBE_DIR_PATH)) if os.path.isfile(KUBE_FILE_PATH): if not _confirm( "Kubernetes configuration file already exists. Overwrite? " ): print("{} configuration update was declined.".format( KUBE_FILE_PATH)) return with open(KUBE_FILE_PATH, 'w+') as fd: yaml.safe_dump(auth_template, fd) print("Config saved [{}]".format(KUBE_FILE_PATH))
def __init__(self, k8s_config=None, namespace='default', in_cluster=False): if not k8s_config: if in_cluster: config.load_incluster_config() else: config.load_kube_config() api_client = None else: api_client = client.api_client.ApiClient(config=k8s_config) self.k8s_api = client.CoreV1Api(api_client) self.k8s_beta_api = client.ExtensionsV1beta1Api(api_client) self.k8s_version_api = client.VersionApi(api_client) self._namespace = namespace
def main(type, *args, **kwargs): global v1 try: config.load_kube_config() version = client.VersionApi().get_code() logging.info(f"Connected to {Configuration._default.host} - {version.git_version}") except Exception as e: logging.error(f"Kubernetes version check failed: {e}") sys.exit(1) res = client.ApiClient().call_api('/metrics', 'GET', _return_http_data_only=True, _preload_content=False) operations = {} prev_value = 0 for line in res.readlines(): match = re.search(r'(?P<metric>.+){(?P<labels>.+)} (?P<value>\d+)', decode(line)) if match: labels = {} metric = match.group('metric') value = int(match.group('value')) if not metric.startswith(METRIC): continue for part in match.group('labels').split(','): k, v = part.split('=') labels[k] = v.strip('"') if not labels.get('type', '').endswith(type): continue if labels['operation'] not in operations: operations[labels['operation']] = {'counts': [], 'buckets': [], 'type': labels['type']} prev_value = 0 if metric.endswith('_bucket'): operations[labels['operation']]['counts'].append(value - prev_value) operations[labels['operation']]['buckets'].append(labels['le']) prev_value = value elif metric.endswith('_sum'): operations[labels['operation']]['sum'] = value elif metric.endswith('_count'): operations[labels['operation']]['count'] = value for operation, stats in operations.items(): print(f"\n{stats['sum'] / stats['count']:.3f} average etcd request duration (seconds): {operation} {stats['type']}") fig = tpl.figure() fig.barh(stats['counts'], stats['buckets'], max_width=50) fig.show()
def get_pods(self, cluster_id, configuration, context): """Get list of pods from cluster.""" api_client = config.new_client_from_config( config_file=configuration, context=context, ) version_api = client.VersionApi(api_client=api_client) version = version_api.get_code() self.log.debug( "Connected to '%s', control plane version %s", cluster_id, version.git_version, ) core_v1 = client.CoreV1Api(api_client=api_client) return [ i.to_dict() for i in core_v1.list_pod_for_all_namespaces(watch=False).items ]
def __init__(self, k8s_config=None, namespace="default", in_cluster=False): if not k8s_config: if in_cluster: config.load_incluster_config() else: config.load_kube_config() api_client = None else: api_client = client.api_client.ApiClient(configuration=k8s_config) self.k8s_api = client.CoreV1Api(api_client) self.k8s_batch_api = client.BatchV1Api(api_client) self.k8s_apps_api = client.AppsV1Api(api_client) self.networking_v1_beta1_api = client.NetworkingV1beta1Api(api_client) self.k8s_custom_object_api = client.CustomObjectsApi() self.k8s_version_api = client.VersionApi(api_client) self.namespace = namespace self.in_cluster = in_cluster
def configure_kubernetes_client(context, debug): try: config.load_incluster_config() log.info('Configured in-cluster Kubernetes client') except config.config_exception.ConfigException: if not context: log.error( 'No in-cluster Kubernetes client possible. And no context specified. Specify context and retry' ) return False try: try: config.load_kube_config( os.path.join(os.environ["HOME"], '.kube/config'), context) log.info( f'Configured Kubernetes client for context: {context}') except config.config_exception.ConfigException: log.error( f'No kubeconfig present for context: {context}. Verify and retry.' ) return False except FileNotFoundError: log.error( 'Can not create Kubernetes client config: no in-cluster config nor $HOME/.kube/config file found' ) return False if debug: c = client.Configuration() c.debug = True log.debug('Enabling DEBUG on Kubernetes client') client.Configuration.set_default(c) # ping kubernetes try: client.VersionApi().get_code() except Exception as e: log.error(f'Unable to ping Kubernetes cluster: {e}') return False return True
def _check_api_server(self, k8s_merger, k8s_loader, pl): current_time = time.monotonic() if current_time - self.last_api_server_check > self.api_server_check_interval: self.last_api_server_check = current_time k8s_merger.save_changes() client_config = kubernetes_client.Configuration() k8s_loader.load_and_set(client_config) version_api = kubernetes_client.VersionApi( kubernetes_client.ApiClient(configuration=client_config)) try: pl.debug(version_api.get_code()) except Exception as e: pl.error(e) self.api_server_alive = False return else: self.api_server_alive = True elif not self.api_server_alive: pl.debug('Assuming kube-apiserver is still dead.')
def __init__(self, **kwargs): """Set cluster and initiate clients for all used resources types Args: **kwargs: keyword arguments and it requires cluster name Raises: ValueError: cluster parameters doesn't exisit """ try: self.cluster = kwargs['cluster'] except KeyError as e: raise ValueError('Missing cluster parameter') logger.debug('Initialized KubernetesAPI for {}'.format(self.cluster)) # set api api_client = self.get_api_client() self.api_corev1 = client.CoreV1Api(api_client=api_client) self.api_storagev1 = client.StorageV1Api(api_client=api_client) self.api_extensionsv1beta1 = client.ExtensionsV1beta1Api( api_client=api_client) self.api_version = client.VersionApi(api_client=api_client)
def __init__(self, **kwargs): """ Set cluster and prepare clients for all used resource types. Args: **kwargs: Keyword arguments (cluster is required) """ # load configuration try: self.cluster = kwargs['cluster'] except KeyError: raise ValueError('Missing parameter cluster') logger.debug('Initialized KubernetesAPI for {}'.format(self.cluster)) # set apis api_client = self.get_api_client() self.api_corev1 = client.CoreV1Api(api_client=api_client) self.api_extensionsv1beta1 = client.ExtensionsV1beta1Api( api_client=api_client) self.api_version = client.VersionApi(api_client=api_client)
def get_version(context): try: """ all of this urllib3 garbage because they "kubernetes" do not respect the snake. This reduces the amount of warnings to 3, instead of 50. (exaggeration) """ logging.getLogger("urllib3").propagate = False client.configuration.urllib3.disable_warnings(True) urllib3.disable_warnings() print(f"---- attempting to contact cluster {context}") client.configuration.urllib3.disable_warnings(True) version_client = client.VersionApi( api_client=config.new_client_from_config(context=each)) for logger in version_client.api_client.configuration.logger.values(): logger.removeHandler( version_client.api_client.configuration.logger_stream_handler) version = version_client.get_code().to_dict().get("git_version") return version except Exception as e: print(e) print(f'---- failed to contact {context}') return None
def get_version_from_cluster( fallback: typing.Union["versioning.KubernetesVersion", str] = None ) -> versioning.KubernetesVersion: """ Returns the KubernetesVersion object associated with the configured cluster. If the cluster version cannot be determined, the specified fallback version will be returned instead. If no fallback is specified the earliest (oldest) version available in the kuber library installation will be used instead. """ versions = versioning.get_all_versions() default = fallback or versions[0] if not isinstance(default, versioning.KubernetesVersion): default = versioning.get_version_data(fallback) try: response: client.VersionInfo = client.VersionApi().get_code() major = response.major minor = response.minor.rstrip("+") except ApiException: return default return next((v for v in versions if v.major == major and v.minor == minor), default)
def check_backend(self): """ Validate k8s components and get useful information""" api_client = _get_k8s_api_client() api = client.VersionApi(api_client) core_v1 = client.CoreV1Api(api_client) _cluster_probe_connection(api, api_client) lbs = core_v1.list_service_for_all_namespaces( label_selector='app=nginx-ingress,' 'component=controller') if not lbs or not lbs.items: print( "Unable to find suitable nginx ingress service. " "Details https://github.com/jetstack/kube-lego/tree/master/examples/nginx" ) sys.exit(1) if len(lbs.items) > 1: print("WARN: Found more than one suitable nginx ingress services.") for lb in lbs.items: print("Service {} IP {}".format( lb.metadata._name, lb.status.load_balancer.ingress[0].ip)) sys.exit(0)