def print_join_token(): import os from api.api_client import running_in_docker_container from kubernetes.client import Configuration master_ip = Configuration().host.split(':')[1][2:] master_port = Configuration().host.split(':')[2] ca_cert = '/etc/kubernetes/pki/ca.crt' if not os.path.exists(ca_cert): ca_cert = '/etc/kubernetes/ca.crt' if running_in_docker_container(): ca_cert = '/tmp' + ca_cert join_token_path = os.path.dirname( os.path.realpath(__file__)) + '/engine/join_token.sh' tokens = engine.utils.list_boostrap_tokens_decoded() if not tokens: print("No bootstrap tokens exist") else: for token in tokens: command = 'sh ' + join_token_path + ' ' + ' '.join( [master_ip, master_port, ca_cert, token]) print('\nExecute: %s' % command) os.system(command)
def connect(): config_file = None if os.environ.get('RD_CONFIG_ENV') == 'incluster': config.load_incluster_config() return if os.environ.get('RD_CONFIG_CONFIG_FILE'): config_file = os.environ.get('RD_CONFIG_CONFIG_FILE') url = None if os.environ.get('RD_CONFIG_URL'): url = os.environ.get('RD_CONFIG_URL') verify_ssl = None if os.environ.get('RD_CONFIG_VERIFY_SSL'): verify_ssl = os.environ.get('RD_CONFIG_VERIFY_SSL') ssl_ca_cert = None if os.environ.get('RD_CONFIG_SSL_CA_CERT'): ssl_ca_cert = os.environ.get('RD_CONFIG_SSL_CA_CERT') token = None if os.environ.get('RD_CONFIG_TOKEN'): token = os.environ.get('RD_CONFIG_TOKEN') log.debug("config file") log.debug(config_file) log.debug("-------------------") if config_file: log.debug("getting settings from file %s" % config_file) config.load_kube_config(config_file=config_file) else: if url: log.debug("getting settings from pluing configuration") configuration = Configuration() configuration.host = url if verify_ssl == 'true': configuration.verify_ssl = verify_ssl else: configuration.verify_ssl = None if ssl_ca_cert: configuration.ssl_ca_cert = ssl_ca_cert configuration.api_key['authorization'] = token configuration.api_key_prefix['authorization'] = 'Bearer' client.Configuration.set_default(configuration) else: log.debug("getting from default config file") config.load_kube_config() c = Configuration() c.assert_hostname = False Configuration.set_default(c)
def _get_api(cluster): """ Get custom objects api associated with a cluster specifier. """ if cluster is None: load_incluster_config() api = CustomObjectsApi() elif cluster == 'remote_transcode': host = os.getenv('REMOTE_TRANSCODE_HOST') port = os.getenv('REMOTE_TRANSCODE_PORT') token = os.getenv('REMOTE_TRANSCODE_TOKEN') cert = os.getenv('REMOTE_TRANSCODE_CERT') conf = Configuration() conf.api_key['authorization'] = token conf.host = f'https://{host}:{port}' conf.verify_ssl = True conf.ssl_ca_cert = cert api_client = ApiClient(conf) api = CustomObjectsApi(api_client) else: cluster_obj = JobCluster.objects.get(pk=cluster) host = cluster_obj.host port = cluster_obj.port token = cluster_obj.token fd, cert = tempfile.mkstemp(text=True) with open(fd, 'w') as f: f.write(cluster_obj.cert) conf = Configuration() conf.api_key['authorization'] = token conf.host = f'https://{host}:{port}' conf.verify_ssl = True conf.ssl_ca_cert = cert api_client = ApiClient(conf) api = CustomObjectsApi(api_client) return api
def test_disable_verify_ssl(self): configuration = Configuration() self.assertTrue(configuration.verify_ssl) _disable_verify_ssl() configuration = Configuration() self.assertFalse(configuration.verify_ssl)
def main(): config.load_kube_config() try: c = Configuration().get_default_copy() except AttributeError: c = Configuration() c.assert_hostname = False Configuration.set_default(c) core_v1 = core_v1_api.CoreV1Api() exec_commands(core_v1)
def _load_kube_config(self): config.load_kube_config('/etc/kubernetes/admin.conf') # Workaround: Turn off SSL/TLS verification c = Configuration() c.verify_ssl = False Configuration.set_default(c)
def _disable_verify_ssl() -> None: if hasattr(Configuration, 'get_default_copy'): configuration = Configuration.get_default_copy() else: configuration = Configuration() configuration.verify_ssl = False Configuration.set_default(configuration)
def run_monitors(endpoint: int, verbose: bool, queue: Queue, close_queue: Queue) -> List[Process]: config.load_kube_config() c = Configuration() c.assert_hostname = False Configuration.set_default(c) api = core_v1_api.CoreV1Api() namespace = 'kube-system' try: pods = api.list_namespaced_pod(namespace, label_selector='k8s-app=cilium') except APIException as e: print('could not list Cilium pods: %s\n' % e) sys.exit(1) names = [pod.metadata.name for pod in pods.items] processes = [Process(target=connect_monitor, args=(name, namespace, queue, close_queue, api, endpoint, verbose)) for name in names] for p in processes: p.start() return processes
def get_kube_client(self, in_cluster=None): from kubernetes import config, client if in_cluster is None: in_cluster = self.in_cluster try: if in_cluster: config.load_incluster_config() else: config.load_kube_config( config_file=self.config_file, context=self.cluster_context ) except ConfigException as e: raise friendly_error.executor_k8s.failed_to_connect_to_cluster( self.in_cluster, e ) if PY2: # For connect_get_namespaced_pod_exec from kubernetes.client import Configuration configuration = Configuration() configuration.assert_hostname = False Configuration.set_default(configuration) return client.CoreV1Api()
def main(args): # load job definition from the job file with open(args.job, 'r') as fi: job = yaml.safe_load(fi) if 'KUBERNETES_PORT' in os.environ: config.load_incluster_config() else: if 'KUBECONFIG' in job: cfg_fn = job['KUBECONFIG'] if 'KUBECONFIG' in os.environ: cfg_fn = os.environ.get('KUBECONFIG', '~/.kube/config') config.load_kube_config(config_file=os.path.expanduser(cfg_fn)) c = Configuration() assert os.path.exists(args.log_path) and os.path.isdir(args.log_path) sch = PodScheduler(pod_name=job['name'], image=job['image'], tasks=job['tasks'], node_selector=job.get('nodeSelector'), namespace=job.get('ns', 'default'), workers_num=job.get('workers_num', 50), log_path=args.log_path, container_mount=job.get('container_mount'), host_mount=job.get('host_mount'), volumes=job.get('volumes'), cpu_mils=args.cpu_mils, verbose=args.verbose) if sch.volumes: for v in sch.volumes: assert 'name' in v assert 'mountPath' in v sch.run_scheduler() print("main: done")
def _get_client(self, server, token): opts = dict( api_key={'authorization': f'Bearer {token}'}, host=server, verify_ssl=False, # default timeout seems to be 1+ minutes retries=5) if self.jump_host: # the ports could be parameterized, but at this point # we only have need of 1 tunnel for 1 service self.jump_host.create_ssh_tunnel() local_port = self.jump_host.local_port opts['proxy'] = f'http://localhost:{local_port}' configuration = Configuration() # the kubernetes client configuration takes a limited set # of parameters during initialization, but there are a lot # more options that can be set to tweak the behavior of the # client via instance variables. We define a set of options # above in the format of var_name:value then set them here # in the configuration object with setattr. for k, v in opts.items(): setattr(configuration, k, v) k8s_client = ApiClient(configuration) try: return DynamicClient(k8s_client) except urllib3.exceptions.MaxRetryError as e: raise StatusCodeError(f"[{self.server}]: {e}")
def init_k8s_client(): # Configs can be set in Configuration class directly or using helper utility. config.load_kube_config() c = Configuration() c.assert_hostname = False Configuration.set_default(c) return core_v1_api.CoreV1Api()
def _set_config(self): configuration = Configuration() configuration.host = CniService.config.host configuration.ssl_ca_cert = CniService.ssl_ca_cert configuration.api_key['authorization'] = CniService.config.api_key[ 'authorization'] Configuration.set_default(configuration)
def pytest_collection_modifyitems(config, items): if not config.getoption(ENABLE_RECURRING_JOB_OPT): skip_upgrade = pytest.mark.skip(reason="need " + ENABLE_RECURRING_JOB_OPT + " option to run") for item in items: if "recurring_job" in item.keywords: item.add_marker(skip_upgrade) c = Configuration() c.assert_hostname = False Configuration.set_default(c) k8sconfig.load_incluster_config() api = k8sclient.CoreV1Api() try: api.read_namespaced_pod( name='csi-provisioner-0', namespace='longhorn-system') skip_upgrade = pytest.mark.skip(reason="environment is not using " + "flexvolume") for item in items: if "flexvolume" in item.keywords: item.add_marker(skip_upgrade) except ApiException as e: if (e.status == 404): skip_upgrade = pytest.mark.skip(reason="environment is not " + "using csi") for item in items: if "csi" in item.keywords: item.add_marker(skip_upgrade) all_nodes_support_mount_propagation = True for node in get_longhorn_api_client().list_node(): node = wait_for_node_mountpropagation_condition( get_longhorn_api_client(), node["name"]) if "conditions" not in node.keys(): all_nodes_support_mount_propagation = False else: conditions = node["conditions"] for key, condition in conditions.iteritems(): if key == NODE_CONDITION_MOUNTPROPAGATION and \ condition["status"] != CONDITION_STATUS_TRUE: all_nodes_support_mount_propagation = False break if not all_nodes_support_mount_propagation: break if not all_nodes_support_mount_propagation: skip_upgrade = pytest.mark.skip(reason="environment does not " + "support base image") skip_node = pytest.mark.skip(reason="environment does not " + "support mount disk") for item in items: if "baseimage" in item.keywords: item.add_marker(skip_upgrade) elif "mountdisk" in item.keywords: item.add_marker(skip_node)
def pytest_collection_modifyitems(config, items): if not config.getoption(ENABLE_RECURRING_JOB_OPT): skip_upgrade = pytest.mark.skip(reason="need " + ENABLE_RECURRING_JOB_OPT + " option to run") for item in items: if "recurring_job" in item.keywords: item.add_marker(skip_upgrade) c = Configuration() c.assert_hostname = False Configuration.set_default(c) k8sconfig.load_incluster_config() api = k8sclient.CoreV1Api() try: api.read_namespaced_pod( name='csi-provisioner-0', namespace='longhorn-system') skip_upgrade = pytest.mark.skip(reason="environment is not using " + "flexvolume") for item in items: if "flexvolume" in item.keywords: item.add_marker(skip_upgrade) except ApiException as e: if (e.status == 404): skip_upgrade = pytest.mark.skip(reason="environment is not " + "using csi") for item in items: if "csi" in item.keywords: item.add_marker(skip_upgrade)
def build_k8s_config(config_path): cluster_path = os.path.join(config_path, "cluster.yaml") if not os.path.isfile(cluster_path): cluster_path = os.path.join(config_path, STATUS_YAML) with open(cluster_path) as f: cluster_config = yaml.full_load(f) config = Configuration() infra_host = find_infra_node_name(cluster_config["machines"]) if os.path.isfile(cluster_path): config.host = "https://%s.%s:1443" % ( infra_host, cluster_config["network"]["domain"]) basic_auth = cluster_config["basic_auth"] else: config.host = cluster_config["machines"][infra_host]["fqdns"] with open(os.path.join(config_path, "clusterID", "k8s_basic_auth.yml")) as auf: basic_auth = yaml.safe_load(auf)["basic_auth"] config.username = basic_auth.split(",")[1] config.password = basic_auth.split(",")[0] bearer = "%s:%s" % (config.username, config.password) encoded = base64.b64encode(bearer.encode("utf-8")).decode("utf-8") config.api_key["authorization"] = "Basic " + encoded config.ssl_ca_cert = os.path.join(config_path, "ssl/apiserver/ca.pem") return config
def k8s_api(): config.load_kube_config() c = Configuration() c.assert_hostname = False Configuration.set_default(c) core_v1 = core_v1_api.CoreV1Api() return core_v1
def main(args): # load job definition from the job file with open(args.job, 'r') as fi: job = yaml.safe_load(fi) config.load_kube_config() c = Configuration() scheduler(job)
def __configure_by_params(self): """ Return API client from configuration file """ auth_args = AUTH_ARG_SPEC.keys() core_configuration = Configuration() for key, value in iteritems(self.params): if key in auth_args and value is not None: if key == 'api_key': setattr( sdk.configuration, key, {'authorization': "Bearer {0}".format(value)}) setattr( core_configuration, key, {'authorization': "Bearer {0}".format(value)}) else: setattr(sdk.configuration, key, value) setattr(core_configuration, key, value) if not self.params.get('verify_ssl'): sdk.configuration.verify_ssl = False core_configuration.verify_ssl = False kube_config.load_kube_config(client_configuration=sdk.configuration) Configuration.set_default(core_configuration) return sdk.DefaultApi(), core_client.CoreV1Api()
def init(): global api config.load_kube_config() c = Configuration() c.assert_hostname = False # Configuration.set_default(c) api = core_v1_api.CoreV1Api()
def build_client(self): configuration = Configuration() configuration.api_key['authorization'] = "Bearer {}".format(self.auth_info['K8S-Token']) configuration.host = self.auth_info['K8S-Endpoint'] configuration.verify_ssl = self._verify_ssl api_client = ApiClient(configuration) return K8sClient(api_client)
def new(self): config.load_kube_config(settings.K8S_CONFIG) c = Configuration() c.assert_hostname = False Configuration.set_default(c) self.k8s = core_v1_api.CoreV1Api() print('K8sclient init end')
def mock_kube_get_kubernetes_config(obj): config = Configuration() config.host = FAKE_API_ENDPOINT config.ssl_ca_cert = self.ssl_ca_file.name config.cert_file = self.cert_file.name config.key_file = self.key_file.name return config
def __init__(self, alg): """ Intializes the connection. If algorithm object includes a remote cluster, use that. Otherwise, use this cluster. """ if alg.cluster: host = alg.cluster.host port = alg.cluster.port token = alg.cluster.token fd, cert = tempfile.mkstemp(text=True) with open(fd, 'w') as f: f.write(alg.cluster.cert) conf = Configuration() conf.api_key['authorization'] = token conf.host = f'{PROTO}{host}:{port}' conf.verify_ssl = True conf.ssl_ca_cert = cert api_client = ApiClient(conf) self.corev1 = CoreV1Api(api_client) self.custom = CustomObjectsApi(api_client) else: load_incluster_config() self.corev1 = CoreV1Api() self.custom = CustomObjectsApi() # Read in the manifest. if alg.manifest: self.manifest = yaml.safe_load(alg.manifest.open(mode='r')) # Save off the algorithm. self.alg = alg
def __init__(self, **kw): super(KubernetesRunner, self).__init__(**kw) config.load_kube_config() c = Configuration() c.assert_hostname = False Configuration.set_default(c) self._kclient = core_v1_api.CoreV1Api() _, active_context = config.list_kube_config_contexts() self._namespace = self._config.resman_opts.get("namespace", "default") self._base_pod_name = pu.sanitized_name(f"pod", self._config.wid) self._base_pod_name = self._base_pod_name.replace("_", "-") self._init_pod_name = pu.sanitized_name("init-pod", self._config.wid) self._init_pod_name = self._init_pod_name.replace("_", "-") self._vol_claim_name = f"{self._base_pod_name}-pvc" self._vol_size = self._config.resman_opts.get("volume_size", "500Mi") self._init_pod_created = False self._vol_claim_created = False
def __init__(self): """ Intializes the connection. If environment variables for remote transcode are defined, connect to that cluster. """ host = os.getenv('REMOTE_TRANSCODE_HOST') port = os.getenv('REMOTE_TRANSCODE_PORT') token = os.getenv('REMOTE_TRANSCODE_TOKEN') cert = os.getenv('REMOTE_TRANSCODE_CERT') self.remote = host is not None if self.remote: conf = Configuration() conf.api_key['authorization'] = token conf.host = f'https://{host}:{port}' conf.verify_ssl = True conf.ssl_ca_cert = cert api_client = ApiClient(conf) self.corev1 = CoreV1Api(api_client) self.custom = CustomObjectsApi(api_client) else: load_incluster_config() self.corev1 = CoreV1Api() self.custom = CustomObjectsApi() self.setup_common_steps()
def pytest_collection_modifyitems(config, items): c = Configuration() c.assert_hostname = False Configuration.set_default(c) k8sconfig.load_incluster_config() core_api = k8sclient.CoreV1Api() check_longhorn(core_api) if config.getoption(SKIP_RECURRING_JOB_OPT): skip_upgrade = pytest.mark.skip(reason="remove " + SKIP_RECURRING_JOB_OPT + " option to run") for item in items: if "recurring_job" in item.keywords: item.add_marker(skip_upgrade) using_csi = check_csi(core_api) if using_csi: skip_upgrade = pytest.mark.skip(reason="environment is not using " + "flexvolume") for item in items: if "flexvolume" in item.keywords: item.add_marker(skip_upgrade) else: skip_upgrade = pytest.mark.skip(reason="environment is not " + "using csi") for item in items: if "csi" in item.keywords: item.add_marker(skip_upgrade) all_nodes_support_mount_propagation = True for node in get_longhorn_api_client().list_node(): node = wait_for_node_mountpropagation_condition( get_longhorn_api_client(), node["name"]) if "conditions" not in node.keys(): all_nodes_support_mount_propagation = False else: conditions = node["conditions"] for key, condition in conditions.iteritems(): if key == NODE_CONDITION_MOUNTPROPAGATION and \ condition["status"] != CONDITION_STATUS_TRUE: all_nodes_support_mount_propagation = False break if not all_nodes_support_mount_propagation: break if not all_nodes_support_mount_propagation: skip_upgrade = pytest.mark.skip(reason="environment does not " + "support base image") skip_node = pytest.mark.skip(reason="environment does not " + "support mount disk") for item in items: if "baseimage" in item.keywords: item.add_marker(skip_upgrade) elif "mountdisk" in item.keywords: item.add_marker(skip_node)
def _refresh_oidc(self, provider): config = Configuration() if 'idp-certificate-authority-data' in provider['config']: ca_cert = tempfile.NamedTemporaryFile(delete=True) if PY3: cert = base64.b64decode( provider['config']['idp-certificate-authority-data'] ).decode('utf-8') else: cert = base64.b64decode( provider['config']['idp-certificate-authority-data'] + "==" ) with open(ca_cert.name, 'w') as fh: fh.write(cert) config.ssl_ca_cert = ca_cert.name else: config.verify_ssl = False client = ApiClient(configuration=config) response = client.request( method="GET", url="%s/.well-known/openid-configuration" % provider['config']['idp-issuer-url'] ) if response.status != 200: return response = json.loads(response.data) request = OAuth2Session( client_id=provider['config']['client-id'], token=provider['config']['refresh-token'], auto_refresh_kwargs={ 'client_id': provider['config']['client-id'], 'client_secret': provider['config']['client-secret'] }, auto_refresh_url=response['token_endpoint'] ) try: refresh = request.refresh_token( token_url=response['token_endpoint'], refresh_token=provider['config']['refresh-token'], auth=(provider['config']['client-id'], provider['config']['client-secret']), verify=config.ssl_ca_cert if config.verify_ssl else None ) except oauthlib.oauth2.rfc6749.errors.InvalidClientIdError: return provider['config'].value['id-token'] = refresh['id_token'] provider['config'].value['refresh-token'] = refresh['refresh_token']
def get_api_client(self): configuration = Configuration() configuration.verify_ssl = False configuration.host = self.cluster.api configuration.api_key['authorization'] = self.cluster.token configuration.api_key_prefix['authorization'] = 'Bearer' api_client = ApiClient(configuration) return api_client
def connect(params): '''return CustomObjectsApi object after parsing user options.''' config.load_kube_config() cfg = Configuration() if params.get("insecure"): cfg.verify_ssl = False api_client = client.ApiClient(configuration=cfg) return client.CustomObjectsApi(api_client=api_client)