def exec_to_pod(command, pod_name, namespace='default'): """ This method uninterractively exec to the pod with the command specified. :param list command: list of the str which specify the command. (required) :param str/kubernetes.client.models.v1_pod.V1Pod pod_name: Pod name or V1Pod obj of the pod. (required) :param str namespace: namespace of the Pod. (default to 'default') :return: str: Output of the command. """ config.load_kube_config() # If someone send kubernetes.client.models.v1_pod.V1Pod object in pod_name if isinstance(pod_name, V1Pod): pod_name = pod_name.metadata.name # config.load_kube_config() conf = Configuration() assert_hostname = conf.assert_hostname conf.assert_hostname = False Configuration.set_default(conf) api = core_v1_api.CoreV1Api() try: result = stream(api.connect_get_namespaced_pod_exec, pod_name,\ namespace, command=command,\ stderr=True, stdin=False, stdout=True, tty=False) except ApiException as err: print 'Execing to NDM-Pod using kubernetes API failed:', str(err) try: result = subprocess.check_output( ['kubectl', 'exec', '-n', namespace, pod_name] + ['--'] + command) except subprocess.CalledProcessError as err: print 'Subprocess error occured',\ 'while executing command on pod:', err.returncode except Exception as err: print 'An error occured while executing command on pod:',\ str(err) try: result = os_popen('kubectl exec -n ' + namespace\ + ' ' + pod_name + ' -- '\ + ' '.join(command)).read() except Exception as err: raise err finally: # Undoing the previous changes to configuration conf.assert_hostname = assert_hostname Configuration.set_default(conf) return result
def pytest_collection_modifyitems(config, items): if not config.getoption(ENABLE_RECURRING_JOB_OPT): skip_upgrade = pytest.mark.skip(reason="need " + ENABLE_RECURRING_JOB_OPT + " option to run") for item in items: if "recurring_job" in item.keywords: item.add_marker(skip_upgrade) c = Configuration() c.assert_hostname = False Configuration.set_default(c) k8sconfig.load_incluster_config() api = k8sclient.CoreV1Api() try: api.read_namespaced_pod( name='csi-provisioner-0', namespace='longhorn-system') skip_upgrade = pytest.mark.skip(reason="environment is not using " + "flexvolume") for item in items: if "flexvolume" in item.keywords: item.add_marker(skip_upgrade) except ApiException as e: if (e.status == 404): skip_upgrade = pytest.mark.skip(reason="environment is not " + "using csi") for item in items: if "csi" in item.keywords: item.add_marker(skip_upgrade)
def get_kube_client(self, in_cluster=None): from kubernetes import config, client if in_cluster is None: in_cluster = self.in_cluster try: if in_cluster: config.load_incluster_config() else: config.load_kube_config( config_file=self.config_file, context=self.cluster_context ) except ConfigException as e: raise friendly_error.executor_k8s.failed_to_connect_to_cluster( self.in_cluster, e ) if PY2: # For connect_get_namespaced_pod_exec from kubernetes.client import Configuration configuration = Configuration() configuration.assert_hostname = False Configuration.set_default(configuration) return client.CoreV1Api()
def k8s_api(): config.load_kube_config() c = Configuration() c.assert_hostname = False Configuration.set_default(c) core_v1 = core_v1_api.CoreV1Api() return core_v1
def init_k8s_client(): # Configs can be set in Configuration class directly or using helper utility. config.load_kube_config() c = Configuration() c.assert_hostname = False Configuration.set_default(c) return core_v1_api.CoreV1Api()
def init(): global api config.load_kube_config() c = Configuration() c.assert_hostname = False # Configuration.set_default(c) api = core_v1_api.CoreV1Api()
def pytest_collection_modifyitems(config, items): if not config.getoption(ENABLE_RECURRING_JOB_OPT): skip_upgrade = pytest.mark.skip(reason="need " + ENABLE_RECURRING_JOB_OPT + " option to run") for item in items: if "recurring_job" in item.keywords: item.add_marker(skip_upgrade) c = Configuration() c.assert_hostname = False Configuration.set_default(c) k8sconfig.load_incluster_config() api = k8sclient.CoreV1Api() try: api.read_namespaced_pod( name='csi-provisioner-0', namespace='longhorn-system') skip_upgrade = pytest.mark.skip(reason="environment is not using " + "flexvolume") for item in items: if "flexvolume" in item.keywords: item.add_marker(skip_upgrade) except ApiException as e: if (e.status == 404): skip_upgrade = pytest.mark.skip(reason="environment is not " + "using csi") for item in items: if "csi" in item.keywords: item.add_marker(skip_upgrade) all_nodes_support_mount_propagation = True for node in get_longhorn_api_client().list_node(): node = wait_for_node_mountpropagation_condition( get_longhorn_api_client(), node["name"]) if "conditions" not in node.keys(): all_nodes_support_mount_propagation = False else: conditions = node["conditions"] for key, condition in conditions.iteritems(): if key == NODE_CONDITION_MOUNTPROPAGATION and \ condition["status"] != CONDITION_STATUS_TRUE: all_nodes_support_mount_propagation = False break if not all_nodes_support_mount_propagation: break if not all_nodes_support_mount_propagation: skip_upgrade = pytest.mark.skip(reason="environment does not " + "support base image") skip_node = pytest.mark.skip(reason="environment does not " + "support mount disk") for item in items: if "baseimage" in item.keywords: item.add_marker(skip_upgrade) elif "mountdisk" in item.keywords: item.add_marker(skip_node)
def run_monitors(endpoint: int, verbose: bool, queue: Queue, close_queue: Queue) -> List[Process]: config.load_kube_config() c = Configuration() c.assert_hostname = False Configuration.set_default(c) api = core_v1_api.CoreV1Api() namespace = 'kube-system' try: pods = api.list_namespaced_pod(namespace, label_selector='k8s-app=cilium') except APIException as e: print('could not list Cilium pods: %s\n' % e) sys.exit(1) names = [pod.metadata.name for pod in pods.items] processes = [Process(target=connect_monitor, args=(name, namespace, queue, close_queue, api, endpoint, verbose)) for name in names] for p in processes: p.start() return processes
def connect(): config_file = None if os.environ.get('RD_CONFIG_ENV') == 'incluster': config.load_incluster_config() return if os.environ.get('RD_CONFIG_CONFIG_FILE'): config_file = os.environ.get('RD_CONFIG_CONFIG_FILE') url = None if os.environ.get('RD_CONFIG_URL'): url = os.environ.get('RD_CONFIG_URL') verify_ssl = None if os.environ.get('RD_CONFIG_VERIFY_SSL'): verify_ssl = os.environ.get('RD_CONFIG_VERIFY_SSL') ssl_ca_cert = None if os.environ.get('RD_CONFIG_SSL_CA_CERT'): ssl_ca_cert = os.environ.get('RD_CONFIG_SSL_CA_CERT') token = None if os.environ.get('RD_CONFIG_TOKEN'): token = os.environ.get('RD_CONFIG_TOKEN') log.debug("config file") log.debug(config_file) log.debug("-------------------") if config_file: log.debug("getting settings from file %s" % config_file) config.load_kube_config(config_file=config_file) else: if url: log.debug("getting settings from pluing configuration") configuration = Configuration() configuration.host = url if verify_ssl == 'true': configuration.verify_ssl = verify_ssl else: configuration.verify_ssl = None if ssl_ca_cert: configuration.ssl_ca_cert = ssl_ca_cert configuration.api_key['authorization'] = token configuration.api_key_prefix['authorization'] = 'Bearer' client.Configuration.set_default(configuration) else: log.debug("getting from default config file") config.load_kube_config() c = Configuration() c.assert_hostname = False Configuration.set_default(c)
def __init__(self, **kw): super(KubernetesRunner, self).__init__(**kw) config.load_kube_config() c = Configuration() c.assert_hostname = False Configuration.set_default(c) self._kclient = core_v1_api.CoreV1Api() _, active_context = config.list_kube_config_contexts() self._namespace = self._config.resman_opts.get("namespace", "default") self._base_pod_name = pu.sanitized_name(f"pod", self._config.wid) self._base_pod_name = self._base_pod_name.replace("_", "-") self._init_pod_name = pu.sanitized_name("init-pod", self._config.wid) self._init_pod_name = self._init_pod_name.replace("_", "-") self._vol_claim_name = f"{self._base_pod_name}-pvc" self._vol_size = self._config.resman_opts.get("volume_size", "500Mi") self._init_pod_created = False self._vol_claim_created = False
def new(self): config.load_kube_config(settings.K8S_CONFIG) c = Configuration() c.assert_hostname = False Configuration.set_default(c) self.k8s = core_v1_api.CoreV1Api() print('K8sclient init end')
def pytest_collection_modifyitems(config, items): c = Configuration() c.assert_hostname = False Configuration.set_default(c) k8sconfig.load_incluster_config() core_api = k8sclient.CoreV1Api() check_longhorn(core_api) if config.getoption(SKIP_RECURRING_JOB_OPT): skip_upgrade = pytest.mark.skip(reason="remove " + SKIP_RECURRING_JOB_OPT + " option to run") for item in items: if "recurring_job" in item.keywords: item.add_marker(skip_upgrade) using_csi = check_csi(core_api) if using_csi: skip_upgrade = pytest.mark.skip(reason="environment is not using " + "flexvolume") for item in items: if "flexvolume" in item.keywords: item.add_marker(skip_upgrade) else: skip_upgrade = pytest.mark.skip(reason="environment is not " + "using csi") for item in items: if "csi" in item.keywords: item.add_marker(skip_upgrade) all_nodes_support_mount_propagation = True for node in get_longhorn_api_client().list_node(): node = wait_for_node_mountpropagation_condition( get_longhorn_api_client(), node["name"]) if "conditions" not in node.keys(): all_nodes_support_mount_propagation = False else: conditions = node["conditions"] for key, condition in conditions.iteritems(): if key == NODE_CONDITION_MOUNTPROPAGATION and \ condition["status"] != CONDITION_STATUS_TRUE: all_nodes_support_mount_propagation = False break if not all_nodes_support_mount_propagation: break if not all_nodes_support_mount_propagation: skip_upgrade = pytest.mark.skip(reason="environment does not " + "support base image") skip_node = pytest.mark.skip(reason="environment does not " + "support mount disk") for item in items: if "baseimage" in item.keywords: item.add_marker(skip_upgrade) elif "mountdisk" in item.keywords: item.add_marker(skip_node)
def set_config(self): configuration = Configuration() configuration.api_key["authorization"] = self.get_access_token() configuration.api_key_prefix["authorization"] = "Bearer" configuration.host = os.environ.get("GKE_MASTER_SERVER") configuration.verify_ssl = False configuration.assert_hostname = False Configuration.set_default(configuration)
def init(): global ecr_client, api ecr_client = ecr.ECRImageDB() config.load_kube_config() c = Configuration() c.assert_hostname = False api = core_v1_api.CoreV1Api() random.seed(seed)
def main(): config.load_kube_config() c = Configuration() c.assert_hostname = False Configuration.set_default(c) core_v1 = core_v1_api.CoreV1Api() exec_commands(core_v1)
def setUp(self): log.setLevel("CRITICAL") config.load_kube_config() c = Configuration() c.assert_hostname = False Configuration.set_default(c) self._kclient = core_v1_api.CoreV1Api() _, active_context = config.list_kube_config_contexts()
def main(): config.load_kube_config() try: c = Configuration().get_default_copy() except AttributeError: c = Configuration() c.assert_hostname = False Configuration.set_default(c) core_v1 = core_v1_api.CoreV1Api() exec_commands(core_v1)
def __init__(self): """Api-client environment initialization Assumption is KUBERNETES env is set so that client has access to oc cluster config. """ config.load_kube_config() conf = Configuration() conf.assert_hostname = False Configuration.set_default(conf) self.api = core_v1_api.CoreV1Api()
def main(): logging.basicConfig(format='%(levelname)s %(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S ', level=logging.INFO) config.load_kube_config() c = Configuration() c.assert_hostname = False Configuration.set_default(c) core_v1 = core_v1_api.CoreV1Api() exec_commands(core_v1)
def main(): logger = create_logger(PurePath(__file__).stem) config.load_incluster_config() configuration = Configuration() configuration.verify_ssl = False configuration.assert_hostname = False urllib3.disable_warnings() Configuration.set_default(configuration) api = core_v1_api.CoreV1Api() label_selector = getenv('LABEL_SELECTOR', 'role=greyhole') namespace = getenv('NAMESPACE', 'storage') command_switch = getenv('COMMAND_SWITCH', '') k8s_response = api.list_namespaced_pod(namespace=namespace, label_selector=label_selector) logger.info(f'ENV Commands {label_selector} {namespace} {command_switch}') logger.debug(f'{k8s_response}') for pod in k8s_response.items: name = pod.metadata.name k8s_response = api.read_namespaced_pod(name=name, namespace=namespace) exec_command = ['/bin/sh', '-c'] if command_switch.lower() == 'monthly': exec_command.append('greyhole --fsck --checksums') elif command_switch.lower() == 'weekly': exec_command.append( 'greyhole --fsck --dont-walk-metadata-store --disk-usage-report' ) elif command_switch.lower() == 'daily': exec_command.append( 'greyhole --fsck --if-conf-changed --dont-walk-metadata-store') else: exec_command.append('greyhole --process-spool --keepalive') k8s_response = stream(api.connect_get_namespaced_pod_exec, name, namespace, command=exec_command, stderr=True, stdin=False, stdout=True, tty=False) logger.info(f'Cleanup {name}: {k8s_response}') logger.info(f'Successfully executed cron job')
def connect(): config_file = None if os.environ.get('RD_CONFIG_ENV') == 'incluster': config.load_incluster_config() return if os.environ.get('RD_CONFIG_CONFIG_FILE'): config_file = os.environ.get('RD_CONFIG_CONFIG_FILE') elif os.environ.get('RD_NODE_KUBERNETES_CONFIG_FILE'): config_file = os.environ.get('RD_NODE_KUBERNETES_CONFIG_FILE') verify_ssl = os.environ.get('RD_CONFIG_VERIFY_SSL') ssl_ca_cert = os.environ.get('RD_CONFIG_SSL_CA_CERT') url = os.environ.get('RD_CONFIG_URL') token = os.environ.get('RD_CONFIG_TOKEN') if not token: token = os.environ.get('RD_CONFIG_TOKEN_STORAGE_PATH') log.debug("config file") log.debug(config_file) log.debug("-------------------") if config_file: log.debug("getting settings from file %s", config_file) config.load_kube_config(config_file=config_file) else: if url and token: log.debug("getting settings from plugin configuration") configuration = Configuration() configuration.host = url if verify_ssl == 'true': configuration.verify_ssl = verify_ssl else: configuration.verify_ssl = None configuration.assert_hostname = False if ssl_ca_cert: configuration.ssl_ca_cert = ssl_ca_cert configuration.api_key['authorization'] = token configuration.api_key_prefix['authorization'] = 'Bearer' client.Configuration.set_default(configuration) else: log.debug( "Either URL or Token is not defined. Fall back to getting settings from default config file [$home/.kube/config]" ) config.load_kube_config()
def __init__(self): config.load_kube_config() con = Configuration() con.assert_hostname = False Configuration.set_default(con) self.v1 = core_v1_api.CoreV1Api() self.CalicoNodePods = [] self.EtcdServerPods = [] self.k8sNodes = [] self.EtcdNodes = {} self.rmNodes = []
def core_api(request): """ Create a new CoreV1API instance. Returns: A new CoreV1API Instance. """ c = Configuration() c.assert_hostname = False Configuration.set_default(c) k8sconfig.load_incluster_config() core_api = k8sclient.CoreV1Api() return core_api
def _load_kube_config(in_cluster, cluster_context, config_file): from kubernetes import config, client if in_cluster: config.load_incluster_config() else: config.load_kube_config(config_file=config_file, context=cluster_context) if PY2: # For connect_get_namespaced_pod_exec from kubernetes.client import Configuration configuration = Configuration() configuration.assert_hostname = False Configuration.set_default(configuration) return client.CoreV1Api()
def kaas(kube_config_file_path): """ Open connection to the kubernets as a service (KAAS) using kubernets python sdk :param kube_config_file_path: Kubernetes konfiguration file path :return: api_client """ custom_configuration = Configuration() custom_configuration.assert_hostname = False load_kube_config(client_configuration=custom_configuration, config_file=kube_config_file_path) Configuration.set_default(custom_configuration) return Kaas(kube_config_file_path)
def _load_kube_config(in_cluster, cluster_context, config_file): if in_cluster: config.load_incluster_config() else: config.load_kube_config(config_file=config_file, context=cluster_context) if PY2: # For connect_get_namespaced_pod_exec configuration = Configuration() configuration.assert_hostname = False Configuration.set_default(configuration) return client.CoreV1Api(), client.BatchV1Api( ), client.NetworkingV1beta1Api()
def _load_kube_config(in_cluster, cluster_context, config_file): if not has_kubernetes: raise _import_err if in_cluster: config.load_incluster_config() else: config.load_kube_config(config_file=config_file, context=cluster_context) if PY2: # For connect_get_namespaced_pod_exec from kubernetes.client import Configuration configuration = Configuration() configuration.assert_hostname = False Configuration.set_default(configuration) return client.CoreV1Api()
def __init__(self, pod_config, input_file_names, output_file_name): if os.name == 'nt': msg = ('Local execution is provided for testing on Posix systems' 'only. We detect you are using Windows.') logger.error(msg) raise NotImplementedError(msg) with open(pod_config, 'r') as fd: self.dep = yaml.load(fd, Loader=yaml.BaseLoader) self.input_file_names = input_file_names self.output_file_name = output_file_name config.load_kube_config() c = Configuration() c.assert_hostname = False Configuration.set_default(c) self.core_v1 = core_v1_api.CoreV1Api()
def __init__(self): self.shutting_down = threading.Event() config.load_incluster_config() # Disabling host name validation is unfortunately required for exec # with the python k8s client version 4.0.0 c = Configuration() c.assert_hostname = False Configuration.set_default(c) self.periodic_check_thread = threading.Thread( name='PeriodicCheck', target=periodical_check, args=(self.shutting_down, args['--periodic-check-interval'])) self.event_listener_thread = threading.Thread( name='EventListener', target=event_listener, args=(self.shutting_down, args['--event-listener-timeout']))
def get_kube_client(self, in_cluster=None): from kubernetes import config, client if in_cluster is None: in_cluster = self.in_cluster if in_cluster: config.load_incluster_config() else: config.load_kube_config(config_file=self.config_file, context=self.cluster_context) if PY2: # For connect_get_namespaced_pod_exec from kubernetes.client import Configuration configuration = Configuration() configuration.assert_hostname = False Configuration.set_default(configuration) return client.CoreV1Api()
def __init__(self): """ Initializes a KubernetesClient """ kubernetes.config.load_kube_config() config = KubeConfig() config.assert_hostname = False KubeConfig.set_default(config) super().__init__() namespaces = [ item.metadata.name for item in KubeApi().list_namespace().items ] if "mlstack" not in namespaces: self.create_namespace( body={ "apiVersion": "v1", "kind": "Namespace", "metadata": { "name": "mlstack" }, })
import time from kubernetes import config from kubernetes.client import Configuration from kubernetes.client.apis import core_v1_api from kubernetes.client.rest import ApiException from kubernetes.stream import stream config.load_kube_config() c = Configuration() c.assert_hostname = False Configuration.set_default(c) api = core_v1_api.CoreV1Api() name = 'busybox-test' resp = None try: resp = api.read_namespaced_pod(name=name, namespace='default') except ApiException as e: if e.status != 404: print("Unknown error: %s" % e) exit(1) if not resp: print("Pod %s does not exist. Creating it..." % name) pod_manifest = { 'apiVersion': 'v1', 'kind': 'Pod', 'metadata': { 'name': name