def __init__(self, aliyun_fc_config, storage_config): logger.debug("Creating Aliyun Function Compute client") self.name = 'aliyun_fc' self.type = 'faas' self.config = aliyun_fc_config self.user_agent = aliyun_fc_config['user_agent'] self.endpoint = aliyun_fc_config['public_endpoint'] self.access_key_id = aliyun_fc_config['access_key_id'] self.access_key_secret = aliyun_fc_config['access_key_secret'] self.role_arn = aliyun_fc_config['role_arn'] self.region = self.endpoint.split('.')[1] self.default_service_name = f'{aliyunfc_config.SERVICE_NAME}_{self.access_key_id[0:4].lower()}' self.service_name = aliyun_fc_config.get('service', self.default_service_name) logger.debug("Set Aliyun FC Service to {}".format(self.service_name)) logger.debug("Set Aliyun FC Endpoint to {}".format(self.endpoint)) self.fc_client = fc2.Client(endpoint=self.endpoint, accessKeyID=self.access_key_id, accessKeySecret=self.access_key_secret) msg = COMPUTE_CLI_MSG.format('Aliyun Function Compute') logger.info("{}".format(msg))
def __init__(self, k8s_config, storage_config): logger.debug("Creating Kubernetes Job client") self.name = 'k8s' self.k8s_config = k8s_config self.storage_config = storage_config self.kubecfg_path = k8s_config.get('kubecfg_path') self.user_agent = k8s_config['user_agent'] try: config.load_kube_config(config_file=self.kubecfg_path) contexts = config.list_kube_config_contexts(config_file=self.kubecfg_path) current_context = contexts[1].get('context') self.namespace = current_context.get('namespace', 'default') self.cluster = current_context.get('cluster') self.k8s_config['namespace'] = self.namespace self.k8s_config['cluster'] = self.cluster self.is_incluster = False except Exception: logger.debug('Loading incluster config') config.load_incluster_config() self.namespace = self.k8s_config.get('namespace', 'default') self.cluster = self.k8s_config.get('cluster', 'default') self.is_incluster = True logger.debug("Set namespace to {}".format(self.namespace)) logger.debug("Set cluster to {}".format(self.cluster)) self.batch_api = client.BatchV1Api() self.core_api = client.CoreV1Api() self.jobs = [] # list to store executed jobs (job_keys) msg = COMPUTE_CLI_MSG.format('Kubernetes Job') logger.info("{} - Namespace: {}".format(msg, self.namespace))
def __init__(self, code_engine_config, storage_config): logger.debug("Creating IBM Code Engine client") self.name = 'code_engine' self.code_engine_config = code_engine_config self.storage_config = storage_config self.kubecfg = code_engine_config.get('kubectl_config') self.user_agent = code_engine_config['user_agent'] config.load_kube_config(config_file=self.kubecfg) self.capi = client.CustomObjectsApi() self.coreV1Api = client.CoreV1Api() contexts = config.list_kube_config_contexts(config_file=self.kubecfg) current_context = contexts[1].get('context') self.namespace = current_context.get('namespace', 'default') logger.debug("Set namespace to {}".format(self.namespace)) self.cluster = current_context.get('cluster') logger.debug("Set cluster to {}".format(self.cluster)) try: self.region = self.cluster.split('//')[1].split('.')[1] except Exception: self.region = self.cluster.replace('http://', '').replace('https://', '') self.job_def_ids = set() msg = COMPUTE_CLI_MSG.format('IBM Code Engine') logger.info("{} - Region: {}".format(msg, self.region))
def __init__(self, gcp_functions_config, internal_storage): self.name = 'gcp_functions' self.type = 'faas' self.gcp_functions_config = gcp_functions_config self.package = 'lithops_v' + __version__ self.region = gcp_functions_config['region'] self.service_account = gcp_functions_config['service_account'] self.project = gcp_functions_config['project_name'] self.credentials_path = gcp_functions_config['credentials_path'] self.num_retries = gcp_functions_config['retries'] self.retry_sleep = gcp_functions_config['retry_sleep'] self.internal_storage = internal_storage # Setup Pub/Sub client try: # Get credentials from JSON file service_account_info = json.load(open(self.credentials_path)) credentials = jwt.Credentials.from_service_account_info(service_account_info, audience=AUDIENCE) credentials_pub = credentials.with_claims(audience=AUDIENCE) except: # Get credentials from gcp function environment credentials_pub = None self.publisher_client = pubsub_v1.PublisherClient(credentials=credentials_pub) msg = COMPUTE_CLI_MSG.format('GCP Functions') logger.info("{} - Region: {} - Project: {}".format(msg, self.region, self.project))
def __init__(self, aws_lambda_config, internal_storage): """ Initialize AWS Lambda Backend """ logger.debug('Creating AWS Lambda client') self.name = 'aws_lambda' self.aws_lambda_config = aws_lambda_config self.user_key = aws_lambda_config['access_key_id'][-4:] self.package = 'lithops_v{}_{}'.format(lithops.__version__, self.user_key) self.region_name = aws_lambda_config['region_name'] self.role_arn = aws_lambda_config['execution_role'] logger.debug('Creating Boto3 AWS Session and Lambda Client') self.aws_session = boto3.Session( aws_access_key_id=aws_lambda_config['access_key_id'], aws_secret_access_key=aws_lambda_config['secret_access_key'], region_name=self.region_name) self.lambda_client = self.aws_session.client( 'lambda', region_name=self.region_name) self.internal_storage = internal_storage sts_client = self.aws_session.client('sts', region_name=self.region_name) self.account_id = sts_client.get_caller_identity()["Account"] self.ecr_client = self.aws_session.client('ecr', region_name=self.region_name) msg = COMPUTE_CLI_MSG.format('AWS Lambda') logger.info("{} - Region: {}".format(msg, self.region_name))
def __init__(self, aliyun_fc_config, storage_config): logger.debug("Creating Aliyun Function Compute client") self.log_active = logger.getEffectiveLevel() != logging.WARNING self.name = 'aliyun_fc' self.config = aliyun_fc_config self.is_lithops_worker = is_lithops_worker() self.version = 'lithops_{}'.format(__version__) self.user_agent = aliyun_fc_config['user_agent'] if 'service' in aliyun_fc_config: self.service_name = aliyun_fc_config['service'] else: self.service_name = aliyunfc_config.SERVICE_NAME self.endpoint = aliyun_fc_config['public_endpoint'] self.access_key_id = aliyun_fc_config['access_key_id'] self.access_key_secret = aliyun_fc_config['access_key_secret'] logger.debug("Set Aliyun FC Service to {}".format(self.service_name)) logger.debug("Set Aliyun FC Endpoint to {}".format(self.endpoint)) self.fc_client = fc2.Client(endpoint=self.endpoint, accessKeyID=self.access_key_id, accessKeySecret=self.access_key_secret) msg = COMPUTE_CLI_MSG.format('Aliyun Function Compute') logger.info("{}".format(msg))
def __init__(self, ibm_vpc_config, mode): logger.debug("Creating IBM VPC client") self.name = 'ibm_vpc' self.config = ibm_vpc_config self.mode = mode self.endpoint = self.config['endpoint'] self.region = self.endpoint.split('//')[1].split('.')[0] self.vpc_name = self.config.get('vpc_name') self.master = None self.workers = [] iam_api_key = self.config.get('iam_api_key') self.custom_image = self.config.get('custom_lithops_image') authenticator = IAMAuthenticator(iam_api_key) self.ibm_vpc_client = VpcV1('2021-01-19', authenticator=authenticator) self.ibm_vpc_client.set_service_url(self.config['endpoint'] + '/v1') user_agent_string = 'ibm_vpc_{}'.format(self.config['user_agent']) self.ibm_vpc_client._set_user_agent_header(user_agent_string) msg = COMPUTE_CLI_MSG.format('IBM VPC') logger.info("{} - Region: {}".format(msg, self.region))
def __init__(self, ibm_vpc_config, mode): logger.debug("Creating IBM VPC client") self.name = 'ibm_vpc' self.config = ibm_vpc_config self.mode = mode self.endpoint = self.config['endpoint'] self.region = self.endpoint.split('//')[1].split('.')[0] self.vpc_name = self.config.get('vpc_name') self.cache_dir = os.path.join(CACHE_DIR, self.name) logger.debug('Setting VPC endpoint to: {}'.format(self.endpoint)) self.master = None self.workers = [] iam_api_key = self.config.get('iam_api_key') self.custom_image = self.config.get('custom_lithops_image') authenticator = IAMAuthenticator(iam_api_key) self.ibm_vpc_client = VpcV1(VPC_API_VERSION, authenticator=authenticator) self.ibm_vpc_client.set_service_url(self.config['endpoint'] + '/v1') user_agent_string = 'ibm_vpc_{}'.format(self.config['user_agent']) self.ibm_vpc_client._set_user_agent_header(user_agent_string) # decorate instance public methods with except/retry logic decorate_instance(self.ibm_vpc_client, vpc_retry_on_except) msg = COMPUTE_CLI_MSG.format('IBM VPC') logger.info("{} - Region: {}".format(msg, self.region))
def __init__(self, ow_config, internal_storage): logger.debug("Creating OpenWhisk client") self.name = 'openwhisk' self.type = 'faas' self.ow_config = ow_config self.is_lithops_worker = is_lithops_worker() self.user_agent = ow_config['user_agent'] self.endpoint = ow_config['endpoint'] self.namespace = ow_config['namespace'] self.api_key = ow_config['api_key'] self.insecure = ow_config.get('insecure', False) logger.debug("Set OpenWhisk Endpoint to {}".format(self.endpoint)) logger.debug("Set OpenWhisk Namespace to {}".format(self.namespace)) logger.debug("Set OpenWhisk Insecure to {}".format(self.insecure)) self.user_key = self.api_key[:5] self.package = 'lithops_v{}_{}'.format(__version__, self.user_key) self.cf_client = OpenWhiskClient(endpoint=self.endpoint, namespace=self.namespace, api_key=self.api_key, insecure=self.insecure, user_agent=self.user_agent) msg = COMPUTE_CLI_MSG.format('OpenWhisk') logger.info("{} - Namespace: {}".format(msg, self.namespace))
def __init__(self, code_engine_config, internal_storage): logger.debug("Creating IBM Code Engine client") self.name = 'code_engine' self.type = 'batch' self.code_engine_config = code_engine_config self.internal_storage = internal_storage self.kubecfg_path = code_engine_config.get('kubecfg_path') self.user_agent = code_engine_config['user_agent'] self.iam_api_key = code_engine_config.get('iam_api_key', None) self.namespace = code_engine_config.get('namespace', None) self.region = code_engine_config.get('region', None) self.ibm_token_manager = None self.is_lithops_worker = is_lithops_worker() if self.namespace and self.region: self.cluster = ce_config.CLUSTER_URL.format(self.region) if self.iam_api_key and not self.is_lithops_worker: self._get_iam_token() else: try: config.load_kube_config(config_file=self.kubecfg_path) logger.debug("Loading kubecfg file") contexts = config.list_kube_config_contexts( config_file=self.kubecfg_path) current_context = contexts[1].get('context') self.namespace = current_context.get('namespace') self.cluster = current_context.get('cluster') if self.iam_api_key: self._get_iam_token() except Exception: logger.debug('Loading incluster kubecfg') config.load_incluster_config() self.code_engine_config['namespace'] = self.namespace self.code_engine_config['cluster'] = self.cluster logger.debug("Set namespace to {}".format(self.namespace)) logger.debug("Set cluster to {}".format(self.cluster)) self.custom_api = client.CustomObjectsApi() self.core_api = client.CoreV1Api() try: self.region = self.cluster.split('//')[1].split('.')[1] except Exception: self.region = self.cluster.replace('http://', '').replace('https://', '') self.jobs = [] # list to store executed jobs (job_keys) msg = COMPUTE_CLI_MSG.format('IBM Code Engine') logger.info("{} - Region: {}".format(msg, self.region))
def __init__(self, vm_config, mode): logger.debug("Creating Virtual Machine client") self.name = 'vm' self.config = vm_config self.mode = mode self.master = None msg = COMPUTE_CLI_MSG.format('Virtual Machine') logger.info("{}".format(msg))
def __init__(self, ibm_cf_config, storage_config): logger.debug("Creating IBM Cloud Functions client") self.name = 'ibm_cf' self.config = ibm_cf_config self.is_lithops_worker = is_lithops_worker() self.user_agent = ibm_cf_config['user_agent'] self.region = ibm_cf_config['region'] self.endpoint = ibm_cf_config['regions'][self.region]['endpoint'] self.namespace = ibm_cf_config['regions'][self.region]['namespace'] self.namespace_id = ibm_cf_config['regions'][self.region].get( 'namespace_id', None) self.api_key = ibm_cf_config['regions'][self.region].get( 'api_key', None) self.iam_api_key = ibm_cf_config.get('iam_api_key', None) logger.debug("Set IBM CF Namespace to {}".format(self.namespace)) logger.debug("Set IBM CF Endpoint to {}".format(self.endpoint)) self.user_key = self.api_key.split( ':')[1][:4] if self.api_key else self.iam_api_key[:4] self.package = 'lithops_v{}_{}'.format(__version__, self.user_key) if self.api_key: enc_api_key = str.encode(self.api_key) auth_token = base64.encodebytes(enc_api_key).replace(b'\n', b'') auth = 'Basic %s' % auth_token.decode('UTF-8') self.cf_client = OpenWhiskClient(endpoint=self.endpoint, namespace=self.namespace, auth=auth, user_agent=self.user_agent) elif self.iam_api_key: api_key_type = 'IAM' token = self.config.get('token', None) token_expiry_time = self.config.get('token_expiry_time', None) self.ibm_token_manager = IBMTokenManager(self.iam_api_key, api_key_type, token, token_expiry_time) token, token_expiry_time = self.ibm_token_manager.get_token() self.config['token'] = token self.config['token_expiry_time'] = token_expiry_time auth = 'Bearer ' + token self.cf_client = OpenWhiskClient(endpoint=self.endpoint, namespace=self.namespace_id, auth=auth, user_agent=self.user_agent) msg = COMPUTE_CLI_MSG.format('IBM CF') logger.info("{} - Region: {} - Namespace: {}".format( msg, self.region, self.namespace))
def __init__(self, localhost_config): logger.debug('Creating Localhost compute client') self.config = localhost_config self.env = {} # dict to store environments self.job_queue = queue.Queue() self.job_manager = None self.should_run = True msg = COMPUTE_CLI_MSG.format('Localhost compute') logger.info("{}".format(msg))
def __init__(self, cloudrun_config, storage_config): logger.debug("Creating Google Cloud Run client") self.name = 'cloudrun' self.cloudrun_config = cloudrun_config self.region = self.cloudrun_config.get('region') self.namespace = self.cloudrun_config.get('namespace', 'default') self.cluster = self.cloudrun_config.get('cluster', 'default') self.workers = self.cloudrun_config.get('workers') msg = COMPUTE_CLI_MSG.format('Google Cloud Run') logger.info("{} - Region: {} - Namespace: {}".format( msg, self.region, self.namespace))
def __init__(self, config): logger.debug("Creating Virtual Machine client") self.config = config self.host = self.config.get('host') self.ssh_credentials = { 'username': self.config.get('ssh_user', 'root'), 'password': self.config.get('ssh_password', None), 'key_filename': self.config.get('ssh_key_filename', None) } msg = COMPUTE_CLI_MSG.format('Virtual Machine') logger.info("{} - Host: {}".format(msg, self.host))
def __init__(self, localhost_config): logger.debug('Creating Localhost client') self.config = localhost_config self.runtime = self.config['runtime'] if '/' not in self.runtime: self.env = DefaultEnv() self.env_type = 'default' else: self.env = DockerEnv(self.runtime) self.env_type = 'docker' msg = COMPUTE_CLI_MSG.format('Localhost Compute') logger.info("{}".format(msg))
def __init__(self, config, storage_config): logger.debug("Creating Azure Functions client") self.name = 'azure_fa' self.azure_config = config self.resource_group = self.azure_config['resource_group'] self.storage_account = self.azure_config['storage_account'] self.account_key = self.azure_config['storage_account_key'] self.location = self.azure_config['location'] self.functions_version = self.azure_config['functions_version'] self.queue_service_url = 'https://{}.queue.core.windows.net'.format(self.storage_account) self.queue_service = QueueServiceClient(account_url=self.queue_service_url, credential=self.account_key) msg = COMPUTE_CLI_MSG.format('Azure Functions') logger.info("{} - Location: {}".format(msg, self.location))
def __init__(self, aws_lambda_config, internal_storage): """ Initialize AWS Lambda Backend """ logger.debug('Creating AWS Lambda client') self.name = 'aws_lambda' self.type = 'faas' self.aws_lambda_config = aws_lambda_config self.internal_storage = internal_storage self.user_agent = aws_lambda_config['user_agent'] self.user_key = aws_lambda_config['access_key_id'][-4:] self.package = 'lithops_v{}_{}'.format(lithops.__version__, self.user_key.lower()).replace( '.', '-') self.region_name = aws_lambda_config['region_name'] self.role_arn = aws_lambda_config['execution_role'] logger.debug('Creating Boto3 AWS Session and Lambda Client') self.aws_session = boto3.Session( aws_access_key_id=aws_lambda_config['access_key_id'], aws_secret_access_key=aws_lambda_config['secret_access_key'], region_name=self.region_name) self.lambda_client = self.aws_session.client( 'lambda', region_name=self.region_name, config=botocore.client.Config(user_agent_extra=self.user_agent)) self.credentials = self.aws_session.get_credentials() self.session = URLLib3Session() self.host = f'lambda.{self.region_name}.amazonaws.com' if self.aws_lambda_config['account_id']: self.account_id = self.aws_lambda_config['account_id'] else: sts_client = self.aws_session.client('sts', region_name=self.region_name) self.account_id = sts_client.get_caller_identity()["Account"] self.ecr_client = self.aws_session.client('ecr', region_name=self.region_name) msg = COMPUTE_CLI_MSG.format('AWS Lambda') logger.info("{} - Region: {}".format(msg, self.region_name))
def __init__(self, localhost_config): logger.debug('Creating Localhost compute client') self.config = localhost_config self.runtime = self.config['runtime'] if '/' not in self.runtime: self.env = DefaultEnv() self.env_type = 'default' else: pull_runtime = self.config.get('pull_runtime', False) self.env = DockerEnv(self.runtime, pull_runtime) self.env_type = 'docker' self.jobs = {} # dict to store executed jobs (job_keys) and PIDs msg = COMPUTE_CLI_MSG.format('Localhost compute') logger.info("{}".format(msg))
def __init__(self, code_engine_config, internal_storage): logger.debug("Creating IBM Code Engine client") self.name = 'code_engine' self.type = 'batch' self.code_engine_config = code_engine_config self.internal_storage = internal_storage self.kubecfg_path = code_engine_config.get('kubecfg_path') self.user_agent = code_engine_config['user_agent'] try: config.load_kube_config(config_file=self.kubecfg_path) contexts = config.list_kube_config_contexts( config_file=self.kubecfg_path) current_context = contexts[1].get('context') self.namespace = current_context.get('namespace', 'default') self.cluster = current_context.get('cluster') self.code_engine_config['namespace'] = self.namespace self.code_engine_config['cluster'] = self.cluster self.is_incluster = False except Exception: logger.debug('Loading incluster config') config.load_incluster_config() self.namespace = self.code_engine_config.get( 'namespace', 'default') self.cluster = self.code_engine_config.get('cluster', 'default') self.is_incluster = True logger.debug("Set namespace to {}".format(self.namespace)) logger.debug("Set cluster to {}".format(self.cluster)) self.capi = client.CustomObjectsApi() self.coreV1Api = client.CoreV1Api() try: self.region = self.cluster.split('//')[1].split('.')[1] except Exception: self.region = self.cluster.replace('http://', '').replace('https://', '') self.jobs = [] # list to store executed jobs (job_keys) msg = COMPUTE_CLI_MSG.format('IBM Code Engine') logger.info("{} - Region: {}".format(msg, self.region))
def __init__(self, aws_batch_config, internal_storage): """ Initialize AWS Batch Backend """ logger.debug('Creating AWS Lambda client') self.name = 'aws_batch' self.type = 'batch' self.aws_batch_config = aws_batch_config self.user_key = aws_batch_config['access_key_id'][-4:] self.package = 'aws-batch_lithops_v{}_{}'.format( lithops.__version__, self.user_key) self.region_name = aws_batch_config['region_name'] self._env_type = self.aws_batch_config['env_type'] self._queue_name = '{}_{}_queue'.format( self.package.replace('.', '-'), self._env_type.replace('_', '-')) self._compute_env_name = '{}_{}_env'.format( self.package.replace('.', '-'), self._env_type.replace('_', '-')) logger.debug('Creating Boto3 AWS Session and Batch Client') self.aws_session = boto3.Session( aws_access_key_id=aws_batch_config['access_key_id'], aws_secret_access_key=aws_batch_config['secret_access_key'], region_name=self.region_name) self.batch_client = self.aws_session.client( 'batch', region_name=self.region_name) self.internal_storage = internal_storage if self.aws_batch_config['account_id']: self.account_id = self.aws_batch_config['account_id'] else: sts_client = self.aws_session.client('sts', region_name=self.region_name) self.account_id = sts_client.get_caller_identity()["Account"] self.ecr_client = self.aws_session.client('ecr', region_name=self.region_name) msg = COMPUTE_CLI_MSG.format('AWS Batch') logger.info("{} - Region: {}".format(msg, self.region_name))
def __init__(self, ibm_vpc_config): logger.debug("Creating IBM VPC client") self.name = 'ibm_vpc' self.config = ibm_vpc_config self.endpoint = self.config['endpoint'] self.region = self.endpoint.split('//')[1].split('.')[0] self.instance_id = self.config['instance_id'] self.ip_address = self.config.get('ip_address', None) self.instance_data = None self.ssh_credentials = { 'username': self.config.get('ssh_user', 'root'), 'password': self.config.get('ssh_password', None), 'key_filename': self.config.get('ssh_key_filename', None) } self.session = requests.session() iam_api_key = self.config.get('iam_api_key') token = self.config.get('token', None) token_expiry_time = self.config.get('token_expiry_time', None) api_key_type = 'IAM' self.iam_token_manager = IBMTokenManager(iam_api_key, api_key_type, token, token_expiry_time) headers = {'content-type': 'application/json'} default_user_agent = self.session.headers['User-Agent'] headers['User-Agent'] = default_user_agent + ' {}'.format( self.config['user_agent']) self.session.headers.update(headers) adapter = requests.adapters.HTTPAdapter() self.session.mount('https://', adapter) msg = COMPUTE_CLI_MSG.format('IBM VPC') logger.info("{} - Region: {} - Host: {}".format( msg, self.region, self.ip_address))
def __init__(self, ec2_config, mode): logger.debug("Creating AWS EC2 client") self.name = 'aws_ec2' self.config = ec2_config self.mode = mode self.region = self.config['region_name'] self.cache_dir = os.path.join(CACHE_DIR, self.name) client_config = botocore.client.Config( user_agent_extra=self.config['user_agent'] ) self.ec2_client = boto3.client( 'ec2', aws_access_key_id=ec2_config['access_key_id'], aws_secret_access_key=ec2_config['secret_access_key'], config=client_config, region_name=self.region ) self.master = None self.workers = [] msg = COMPUTE_CLI_MSG.format('AWS EC2') logger.info("{} - Region: {}".format(msg, self.region))
def __init__(self, knative_config, storage_config): self.name = 'knative' self.knative_config = knative_config self.istio_endpoint = self.knative_config.get('istio_endpoint') self.kubecfg_path = self.knative_config.get('kubecfg_path') # k8s config can be incluster, in ~/.kube/config or generate kube-config.yaml file and # set env variable KUBECONFIG=<path-to-kube-confg> try: config.load_kube_config(config_file=self.kubecfg_path) contexts = config.list_kube_config_contexts( config_file=self.kubecfg_path) current_context = contexts[1].get('context') self.namespace = current_context.get('namespace', 'default') self.cluster = current_context.get('cluster') self.knative_config['namespace'] = self.namespace self.knative_config['cluster'] = self.cluster self.is_incluster = False except Exception: config.load_incluster_config() self.namespace = self.knative_config.get('namespace', 'default') self.cluster = self.knative_config.get('cluster', 'default') self.is_incluster = True logger.debug("Set namespace to {}".format(self.namespace)) logger.debug("Set cluster to {}".format(self.cluster)) self.api = client.CustomObjectsApi() self.v1 = client.CoreV1Api() if self.istio_endpoint is None: try: ingress = self.v1.read_namespaced_service( 'istio-ingressgateway', 'istio-system') http_port = list( filter(lambda port: port.port == 80, ingress.spec.ports))[0].node_port # https_port = list(filter(lambda port: port.port == 443, ingress.spec.ports))[0].node_port if ingress.status.load_balancer.ingress is not None: # get loadbalancer ip ip = ingress.status.load_balancer.ingress[0].ip else: # for minikube or a baremetal cluster that has no external load balancer node = self.v1.list_node() ip = node.items[0].status.addresses[0].address if ip and http_port: self.istio_endpoint = 'http://{}:{}'.format(ip, http_port) self.knative_config['istio_endpoint'] = self.istio_endpoint except Exception: pass if 'service_host_suffix' not in self.knative_config: self.serice_host_filename = os.path.join(CACHE_DIR, 'knative', self.cluster, 'service_host') self.service_host_suffix = None if os.path.exists(self.serice_host_filename): serice_host_data = load_yaml_config(self.serice_host_filename) self.service_host_suffix = serice_host_data[ 'service_host_suffix'] self.knative_config[ 'service_host_suffix'] = self.service_host_suffix else: self.service_host_suffix = self.knative_config[ 'service_host_suffix'] logger.debug('Loaded service host suffix: {}'.format( self.service_host_suffix)) msg = COMPUTE_CLI_MSG.format('Knative') if self.istio_endpoint: msg += ' - Istio Endpoint: {}'.format(self.istio_endpoint) elif self.cluster: msg += ' - Cluster: {}'.format(self.cluster) logger.info("{}".format(msg))
def __init__(self, code_engine_config, internal_storage): logger.debug("Creating IBM Code Engine client") self.name = 'code_engine' self.type = 'batch' self.code_engine_config = code_engine_config self.internal_storage = internal_storage self.kubecfg_path = code_engine_config.get('kubecfg_path') self.user_agent = code_engine_config['user_agent'] self.iam_api_key = code_engine_config.get('iam_api_key', None) self.namespace = code_engine_config.get('namespace', None) self.region = code_engine_config.get('region', None) if self.namespace and self.region and self.iam_api_key: self.cluster = ce_config.CLUSTER_URL.format(self.region) configuration = client.Configuration() configuration.host = self.cluster token = self._get_iam_token() configuration.api_key = {"authorization": "Bearer " + token} client.Configuration.set_default(configuration) else: try: config.load_kube_config(config_file=self.kubecfg_path) contexts = config.list_kube_config_contexts( config_file=self.kubecfg_path) current_context = contexts[1].get('context') self.namespace = current_context.get('namespace') self.cluster = current_context.get('cluster') self.code_engine_config['namespace'] = self.namespace self.code_engine_config['cluster'] = self.cluster if self.iam_api_key: configuration = client.Configuration.get_default_copy() token = self._get_iam_token() configuration.api_key = { "authorization": "Bearer " + token } client.Configuration.set_default(configuration) except Exception: logger.debug('Loading incluster config') config.load_incluster_config() self.namespace = self.code_engine_config.get('namespace') self.cluster = self.code_engine_config.get('cluster') logger.debug("Set namespace to {}".format(self.namespace)) logger.debug("Set cluster to {}".format(self.cluster)) self.capi = client.CustomObjectsApi() self.coreV1Api = client.CoreV1Api() try: self.region = self.cluster.split('//')[1].split('.')[1] except Exception: self.region = self.cluster.replace('http://', '').replace('https://', '') self.jobs = [] # list to store executed jobs (job_keys) msg = COMPUTE_CLI_MSG.format('IBM Code Engine') logger.info("{} - Region: {}".format(msg, self.region))