def __init__(self, ow_config): logger.debug("Creating OpenWhisk client") self.log_active = logger.getEffectiveLevel() != logging.WARNING self.name = 'openwhisk' self.ow_config = ow_config self.is_lithops_function = is_lithops_function() self.user_agent = ow_config['user_agent'] self.endpoint = ow_config['endpoint'] self.namespace = ow_config['namespace'] self.api_key = ow_config['api_key'] self.insecure = ow_config.get('insecure', False) logger.info("Set OpenWhisk Endpoint to {}".format(self.endpoint)) logger.info("Set OpenWhisk Namespace to {}".format(self.namespace)) logger.info("Set OpenWhisk Insecure to {}".format(self.insecure)) self.user_key = self.api_key[:5] self.package = 'lithops_v{}_{}'.format(__version__, self.user_key) self.cf_client = OpenWhiskClient(endpoint=self.endpoint, namespace=self.namespace, api_key=self.api_key, insecure=self.insecure, user_agent=self.user_agent) log_msg = ('Lithops v{} init for OpenWhisk - Namespace: {}'.format( __version__, self.namespace)) if not self.log_active: print(log_msg) logger.info("OpenWhisk client created successfully")
def __init__(self, ceph_config, **kwargs): logger.debug("Creating Ceph client") self.ceph_config = ceph_config self.is_lithops_function = is_lithops_function() user_agent = ceph_config['user_agent'] service_endpoint = ceph_config.get('endpoint') logger.debug("Seting Ceph endpoint to {}".format(service_endpoint)) logger.debug("Using access_key and secret_key") access_key = ceph_config.get('access_key') secret_key = ceph_config.get('secret_key') client_config = ibm_botocore.client.Config( max_pool_connections=128, user_agent_extra=user_agent, connect_timeout=CONN_READ_TIMEOUT, read_timeout=CONN_READ_TIMEOUT, retries={'max_attempts': OBJ_REQ_RETRIES}) self.cos_client = ibm_boto3.client('s3', aws_access_key_id=access_key, aws_secret_access_key=secret_key, config=client_config, endpoint_url=service_endpoint) logger.debug("Ceph client created successfully")
def __init__(self, lithops_config, internal_storage, token_bucket_q): self.config = lithops_config self.internal_storage = internal_storage self.token_bucket_q = token_bucket_q self.is_lithops_function = is_lithops_function() self.monitors = [] self.rabbitmq_monitor = self.config['lithops'].get('rabbitmq_monitor', False) if self.rabbitmq_monitor: self.rabbit_amqp_url = self.config['rabbitmq'].get('amqp_url')
def __init__(self, config, bucket=None, executor_id=None): self.bucket = bucket self.config = config self.auth = oss2.Auth(self.config['access_key_id'], self.config['access_key_secret']) if is_lithops_function(): self.endpoint = self.config['internal_endpoint'] else: self.endpoint = self.config['public_endpoint'] self.bucket = oss2.Bucket(self.auth, self.endpoint, self.bucket)
def __init__(self, infinispan_config, **kwargs): logger.debug("Creating Infinispan client") self.infinispan_config = infinispan_config self.is_lithops_function = is_lithops_function() self.basicAuth = HTTPBasicAuth(infinispan_config.get('username'), infinispan_config.get('password')) self.endpoint = infinispan_config.get('endpoint') self.cache_manager = infinispan_config.get('cache_manager', 'default') self.cache_name = self.__generate_cache_name(kwargs['bucket'], kwargs['executor_id']) self.infinispan_client = requests.session() self.__is_server_version_supported() res = self.infinispan_client.head(self.endpoint + '/rest/v2/caches/' + self.cache_name, auth=self.basicAuth) if res.status_code == 404: logger.debug('going to create new Infinispan cache {}'.format(self.cache_name)) res = self.infinispan_client.post(self.endpoint + '/rest/v2/caches/' + self.cache_name + '?template=org.infinispan.DIST_SYNC') logger.debug('New Infinispan cache {} created with status {}'.format(self.cache_name, res.status_code)) logger.debug("Infinispan client created successfully")
def get_runtime_meta(self, key): """ Get the metadata given a runtime name. :param runtime: name of the runtime :return: runtime metadata """ path = [RUNTIMES_PREFIX, __version__, key + ".meta.json"] filename_local_path = os.path.join(CACHE_DIR, *path) if os.path.exists(filename_local_path) and not is_lithops_function(): logger.debug("Runtime metadata found in local cache") with open(filename_local_path, "r") as f: runtime_meta = json.loads(f.read()) return runtime_meta else: logger.debug( "Runtime metadata not found in local cache. Retrieving it from storage" ) try: obj_key = '/'.join(path).replace('\\', '/') logger.debug( 'Trying to download runtime metadata from: {}://{}/{}'. format(self.backend, self.bucket, obj_key)) json_str = self.storage.get_object(self.bucket, obj_key) logger.debug('Runtime metadata found in storage') runtime_meta = json.loads(json_str.decode("ascii")) # Save runtime meta to cache if not os.path.exists(os.path.dirname(filename_local_path)): os.makedirs(os.path.dirname(filename_local_path)) with open(filename_local_path, "w") as f: f.write(json.dumps(runtime_meta)) return runtime_meta except StorageNoSuchKeyError: logger.debug('Runtime metadata not found in storage') raise Exception( 'The runtime {} is not installed.'.format(obj_key))
def put_runtime_meta(self, key, runtime_meta): """ Puit the metadata given a runtime config. :param runtime: name of the runtime :param runtime_meta metadata """ path = [RUNTIMES_PREFIX, __version__, key + ".meta.json"] obj_key = '/'.join(path).replace('\\', '/') logger.debug("Uploading runtime metadata to: {}://{}/{}".format( self.backend, self.bucket, obj_key)) self.storage.put_object(self.bucket, obj_key, json.dumps(runtime_meta)) if not is_lithops_function(): filename_local_path = os.path.join(CACHE_DIR, *path) logger.debug( "Storing runtime metadata into local cache: {}".format( filename_local_path)) if not os.path.exists(os.path.dirname(filename_local_path)): os.makedirs(os.path.dirname(filename_local_path)) with open(filename_local_path, "w") as f: f.write(json.dumps(runtime_meta))
def __init__(self, config, executor_id, internal_storage): self.log_active = logger.getEffectiveLevel() != logging.WARNING self.config = config self.executor_id = executor_id self.storage_config = extract_storage_config(self.config) self.internal_storage = internal_storage self.compute_config = extract_compute_config(self.config) self.is_lithops_function = is_lithops_function() self.invokers = [] self.remote_invoker = self.config['lithops'].get('remote_invoker', False) self.workers = self.config['lithops'].get('workers') logger.debug('ExecutorID {} - Total available workers: {}' .format(self.executor_id, self.workers)) self.compute_handlers = [] cb = self.compute_config['backend'] regions = self.compute_config[cb].get('region') if regions and type(regions) == list: for region in regions: compute_config = self.compute_config.copy() compute_config[cb]['region'] = region compute_handler = Compute(compute_config) self.compute_handlers.append(compute_handler) else: compute_handler = Compute(self.compute_config) self.compute_handlers.append(compute_handler) logger.debug('ExecutorID {} - Creating function invoker'.format(self.executor_id)) self.token_bucket_q = Queue() self.pending_calls_q = Queue() self.running_flag = Value('i', 0) self.ongoing_activations = 0 self.job_monitor = JobMonitor(self.config, self.internal_storage, self.token_bucket_q)
def __init__(self, ibm_cf_config): logger.debug("Creating IBM Cloud Functions client") self.log_active = logger.getEffectiveLevel() != logging.WARNING self.name = 'ibm_cf' self.ibm_cf_config = ibm_cf_config self.is_lithops_function = is_lithops_function() self.user_agent = ibm_cf_config['user_agent'] self.region = ibm_cf_config['region'] self.endpoint = ibm_cf_config['regions'][self.region]['endpoint'] self.namespace = ibm_cf_config['regions'][self.region]['namespace'] self.namespace_id = ibm_cf_config['regions'][self.region].get( 'namespace_id', None) self.api_key = ibm_cf_config['regions'][self.region].get( 'api_key', None) self.iam_api_key = ibm_cf_config.get('iam_api_key', None) logger.info("Set IBM CF Namespace to {}".format(self.namespace)) logger.info("Set IBM CF Endpoint to {}".format(self.endpoint)) self.user_key = self.api_key[: 5] if self.api_key else self.iam_api_key[: 5] self.package = 'lithops_v{}_{}'.format(__version__, self.user_key) if self.api_key: enc_api_key = str.encode(self.api_key) auth_token = base64.encodebytes(enc_api_key).replace(b'\n', b'') auth = 'Basic %s' % auth_token.decode('UTF-8') self.cf_client = OpenWhiskClient(endpoint=self.endpoint, namespace=self.namespace, auth=auth, user_agent=self.user_agent) elif self.iam_api_key: token_manager = DefaultTokenManager(api_key_id=self.iam_api_key) token_filename = os.path.join(CACHE_DIR, 'ibm_cf', 'iam_token') if 'token' in self.ibm_cf_config: logger.debug( "Using IBM IAM API Key - Reusing Token from config") token_manager._token = self.ibm_cf_config['token'] token_manager._expiry_time = datetime.strptime( self.ibm_cf_config['token_expiry_time'], '%Y-%m-%d %H:%M:%S.%f%z') token_minutes_diff = int( (token_manager._expiry_time - datetime.now(timezone.utc)).total_seconds() / 60.0) logger.debug("Token expiry time: {} - Minutes left: {}".format( token_manager._expiry_time, token_minutes_diff)) elif os.path.exists(token_filename): logger.debug( "Using IBM IAM API Key - Reusing Token from local cache") token_data = load_yaml_config(token_filename) token_manager._token = token_data['token'] token_manager._expiry_time = datetime.strptime( token_data['token_expiry_time'], '%Y-%m-%d %H:%M:%S.%f%z') token_minutes_diff = int( (token_manager._expiry_time - datetime.now(timezone.utc)).total_seconds() / 60.0) logger.debug("Token expiry time: {} - Minutes left: {}".format( token_manager._expiry_time, token_minutes_diff)) if (token_manager._is_expired() or token_minutes_diff < 11) and not is_lithops_function(): logger.debug( "Using IBM IAM API Key - Token expired. Requesting new token" ) token_manager._token = None token_manager.get_token() token_data = {} token_data['token'] = token_manager._token token_data[ 'token_expiry_time'] = token_manager._expiry_time.strftime( '%Y-%m-%d %H:%M:%S.%f%z') dump_yaml_config(token_filename, token_data) ibm_cf_config['token'] = token_manager._token ibm_cf_config[ 'token_expiry_time'] = token_manager._expiry_time.strftime( '%Y-%m-%d %H:%M:%S.%f%z') auth_token = token_manager._token auth = 'Bearer ' + auth_token self.cf_client = OpenWhiskClient(endpoint=self.endpoint, namespace=self.namespace_id, auth=auth, user_agent=self.user_agent) log_msg = ( 'Lithops v{} init for IBM Cloud Functions - Namespace: {} - ' 'Region: {}'.format(__version__, self.namespace, self.region)) if not self.log_active: print(log_msg) logger.info("IBM CF client created successfully")
if 'r' in mode: if 'b' in mode: # we could get_data(stream=True) but some streams are not seekable return io.BytesIO(storage.get_data(filename)) else: return io.StringIO(storage.get_data(filename).decode()) if 'w' in mode: action = partial(storage.put_data, filename) if 'b' in mode: return DelayedBytesBuffer(action) else: return DelayedStringBuffer(action) if not is_lithops_function(): try: _storage = CloudStorage() except FileNotFoundError: # should never happen unless we are using # this module classes for other purposes os = None open = None else: os = CloudFileProxy(_storage) open = partial(cloud_open, cloud_storage=_storage) else: # should never be used unless we explicitly import # inside a function, which is not a good practice os = None open = None
def __init__(self, config=None, runtime=None, runtime_memory=None, compute_backend=None, compute_backend_region=None, storage_backend=None, storage_backend_region=None, workers=None, rabbitmq_monitor=None, remote_invoker=None, log_level=None): """ Initialize a FunctionExecutor class. :param config: Settings passed in here will override those in config file. Default None. :param runtime: Runtime name to use. Default None. :param runtime_memory: memory to use in the runtime. Default None. :param compute_backend: Name of the compute backend to use. Default None. :param compute_backend_region: Name of the compute backend region to use. Default None. :param storage_backend: Name of the storage backend to use. Default None. :param storage_backend_region: Name of the storage backend region to use. Default None. :param workers: Max number of concurrent workers. :param rabbitmq_monitor: use rabbitmq as the monitoring system. Default None. :param log_level: log level to use during the execution. Default None. :return `FunctionExecutor` object. """ self.is_lithops_function = is_lithops_function() if log_level: default_logging_config(log_level) self.log_active = logger.getEffectiveLevel() != logging.WARNING # Overwrite lithops config parameters pw_config_ow = {} if runtime is not None: pw_config_ow['runtime'] = runtime if runtime_memory is not None: pw_config_ow['runtime_memory'] = int(runtime_memory) if compute_backend is not None: pw_config_ow['compute_backend'] = compute_backend if compute_backend_region is not None: pw_config_ow['compute_backend_region'] = compute_backend_region if storage_backend is not None: pw_config_ow['storage_backend'] = storage_backend if storage_backend_region is not None: pw_config_ow['storage_backend_region'] = storage_backend_region if workers is not None: pw_config_ow['workers'] = workers if rabbitmq_monitor is not None: pw_config_ow['rabbitmq_monitor'] = rabbitmq_monitor if remote_invoker is not None: pw_config_ow['remote_invoker'] = remote_invoker self.config = default_config(copy.deepcopy(config), pw_config_ow) self.executor_id = create_executor_id() logger.debug('FunctionExecutor created with ID: {}'.format(self.executor_id)) self.data_cleaner = self.config['lithops'].get('data_cleaner', True) self.auto_dismantle = self.config['lithops'].get('auto_dismantle', True) self.rabbitmq_monitor = self.config['lithops'].get('rabbitmq_monitor', False) if self.rabbitmq_monitor: if 'rabbitmq' in self.config and 'amqp_url' in self.config['rabbitmq']: self.rabbit_amqp_url = self.config['rabbitmq'].get('amqp_url') else: raise Exception("You cannot use rabbitmq_mnonitor since 'amqp_url'" " is not present in configuration") storage_config = extract_storage_config(self.config) self.internal_storage = InternalStorage(storage_config) self.storage = self.internal_storage.storage self.invoker = FunctionInvoker(self.config, self.executor_id, self.internal_storage) self.futures = [] self.total_jobs = 0 self.cleaned_jobs = set() self.last_call = None
def get_token(self): if (self._token_manager._is_expired() or self._get_token_minutes_diff() < 11) and not is_lithops_function(): logger.debug("Using IBM IAM API Key - Token expired. Requesting new token") self._generate_new_token() token = self._token_manager._token token_expiry_time = self._token_manager._expiry_time.strftime('%Y-%m-%d %H:%M:%S.%f%z') return token, token_expiry_time
def __init__(self, ibm_cos_config, **kwargs): logger.debug("Creating IBM COS client") self.ibm_cos_config = ibm_cos_config self.is_lithops_function = is_lithops_function() user_agent = ibm_cos_config['user_agent'] service_endpoint = ibm_cos_config.get('endpoint').replace('http:', 'https:') if self.is_lithops_function and 'private_endpoint' in ibm_cos_config: service_endpoint = ibm_cos_config.get('private_endpoint') if 'api_key' in ibm_cos_config: service_endpoint = service_endpoint.replace('http:', 'https:') logger.debug("Set IBM COS Endpoint to {}".format(service_endpoint)) api_key = None if 'api_key' in ibm_cos_config: api_key = ibm_cos_config.get('api_key') api_key_type = 'COS' elif 'iam_api_key' in ibm_cos_config: api_key = ibm_cos_config.get('iam_api_key') api_key_type = 'IAM' if {'secret_key', 'access_key'} <= set(ibm_cos_config): logger.debug("Using access_key and secret_key") access_key = ibm_cos_config.get('access_key') secret_key = ibm_cos_config.get('secret_key') client_config = ibm_botocore.client.Config(max_pool_connections=128, user_agent_extra=user_agent, connect_timeout=CONN_READ_TIMEOUT, read_timeout=CONN_READ_TIMEOUT, retries={'max_attempts': OBJ_REQ_RETRIES}) self.cos_client = ibm_boto3.client('s3', aws_access_key_id=access_key, aws_secret_access_key=secret_key, config=client_config, endpoint_url=service_endpoint) elif api_key is not None: client_config = ibm_botocore.client.Config(signature_version='oauth', max_pool_connections=128, user_agent_extra=user_agent, connect_timeout=CONN_READ_TIMEOUT, read_timeout=CONN_READ_TIMEOUT, retries={'max_attempts': OBJ_REQ_RETRIES}) token_manager = DefaultTokenManager(api_key_id=api_key) token_filename = os.path.join(CACHE_DIR, 'ibm_cos', api_key_type.lower()+'_token') token_minutes_diff = 0 if 'token' in self.ibm_cos_config: logger.debug("Using IBM {} API Key - Reusing Token from config".format(api_key_type)) token_manager._token = self.ibm_cos_config['token'] token_manager._expiry_time = datetime.strptime(self.ibm_cos_config['token_expiry_time'], '%Y-%m-%d %H:%M:%S.%f%z') token_minutes_diff = int((token_manager._expiry_time - datetime.now(timezone.utc)).total_seconds() / 60.0) logger.debug("Token expiry time: {} - Minutes left: {}".format(token_manager._expiry_time, token_minutes_diff)) elif os.path.exists(token_filename): token_data = load_yaml_config(token_filename) logger.debug("Using IBM {} API Key - Reusing Token from local cache".format(api_key_type)) token_manager._token = token_data['token'] token_manager._expiry_time = datetime.strptime(token_data['token_expiry_time'], '%Y-%m-%d %H:%M:%S.%f%z') token_minutes_diff = int((token_manager._expiry_time - datetime.now(timezone.utc)).total_seconds() / 60.0) logger.debug("Token expiry time: {} - Minutes left: {}".format(token_manager._expiry_time, token_minutes_diff)) if (token_manager._is_expired() or token_minutes_diff < 11) and not is_lithops_function(): logger.debug("Using IBM {} API Key - Token expired. Requesting new token".format(api_key_type)) token_manager._token = None token_manager.get_token() token_data = {} token_data['token'] = token_manager._token token_data['token_expiry_time'] = token_manager._expiry_time.strftime('%Y-%m-%d %H:%M:%S.%f%z') dump_yaml_config(token_filename, token_data) if token_manager._token: self.ibm_cos_config['token'] = token_manager._token if token_manager._expiry_time: self.ibm_cos_config['token_expiry_time'] = token_manager._expiry_time.strftime('%Y-%m-%d %H:%M:%S.%f%z') self.cos_client = ibm_boto3.client('s3', token_manager=token_manager, config=client_config, endpoint_url=service_endpoint) logger.debug("IBM COS client created successfully")