示例#1
0
    def __init__(self, config, executor_id, internal_storage, compute_handler):
        self.log_active = logger.getEffectiveLevel() != logging.WARNING
        self.config = config
        self.executor_id = executor_id
        self.storage_config = extract_storage_config(self.config)
        self.internal_storage = internal_storage
        self.compute_handler = compute_handler
        self.is_lithops_worker = is_lithops_worker()
        self.invokers = []

        self.remote_invoker = self.config['serverless'].get(
            'remote_invoker', False)
        self.workers = self.config['lithops'].get('workers')
        logger.debug('ExecutorID {} - Total available workers: {}'.format(
            self.executor_id, self.workers))

        if not is_lithops_worker() and is_unix_system():
            self.token_bucket_q = multiprocessing.Queue()
            self.pending_calls_q = multiprocessing.Queue()
            self.running_flag = multiprocessing.Value('i', 0)
        else:
            self.token_bucket_q = queue.Queue()
            self.pending_calls_q = queue.Queue()
            self.running_flag = SimpleNamespace(value=0)

        self.ongoing_activations = 0
        self.job_monitor = JobMonitor(self.config, self.internal_storage,
                                      self.token_bucket_q)

        logger.debug('ExecutorID {} - Serverless invoker created'.format(
            self.executor_id))
示例#2
0
    def __init__(self, config, executor_id, internal_storage, compute_handler, job_monitor):
        super().__init__(config, executor_id, internal_storage, compute_handler, job_monitor)

        remote_invoker = self.config['serverless'].get('remote_invoker', False)
        self.remote_invoker = remote_invoker if not is_lithops_worker() else False

        self.invokers = []
        self.ongoing_activations = 0
        self.pending_calls_q = queue.Queue()
        self.should_run = False
        self.sync = is_lithops_worker()

        logger.debug('ExecutorID {} - Serverless invoker created'.format(self.executor_id))
示例#3
0
    def __init__(self, ow_config, internal_storage):
        logger.debug("Creating OpenWhisk client")
        self.name = 'openwhisk'
        self.type = 'faas'
        self.ow_config = ow_config
        self.is_lithops_worker = is_lithops_worker()

        self.user_agent = ow_config['user_agent']

        self.endpoint = ow_config['endpoint']
        self.namespace = ow_config['namespace']
        self.api_key = ow_config['api_key']
        self.insecure = ow_config.get('insecure', False)

        logger.debug("Set OpenWhisk Endpoint to {}".format(self.endpoint))
        logger.debug("Set OpenWhisk Namespace to {}".format(self.namespace))
        logger.debug("Set OpenWhisk Insecure to {}".format(self.insecure))

        self.user_key = self.api_key[:5]
        self.package = 'lithops_v{}_{}'.format(__version__, self.user_key)

        self.cf_client = OpenWhiskClient(endpoint=self.endpoint,
                                         namespace=self.namespace,
                                         api_key=self.api_key,
                                         insecure=self.insecure,
                                         user_agent=self.user_agent)

        msg = COMPUTE_CLI_MSG.format('OpenWhisk')
        logger.info("{} - Namespace: {}".format(msg, self.namespace))
示例#4
0
    def __init__(self, ow_config, storage_config):
        logger.debug("Creating OpenWhisk client")
        self.log_active = logger.getEffectiveLevel() != logging.WARNING
        self.name = 'openwhisk'
        self.ow_config = ow_config
        self.is_lithops_worker = is_lithops_worker()

        self.user_agent = ow_config['user_agent']

        self.endpoint = ow_config['endpoint']
        self.namespace = ow_config['namespace']
        self.api_key = ow_config['api_key']
        self.insecure = ow_config.get('insecure', False)

        logger.info("Set OpenWhisk Endpoint to {}".format(self.endpoint))
        logger.info("Set OpenWhisk Namespace to {}".format(self.namespace))
        logger.info("Set OpenWhisk Insecure to {}".format(self.insecure))

        self.user_key = self.api_key[:5]
        self.package = 'lithops_v{}_{}'.format(__version__, self.user_key)

        self.cf_client = OpenWhiskClient(endpoint=self.endpoint,
                                         namespace=self.namespace,
                                         api_key=self.api_key,
                                         insecure=self.insecure,
                                         user_agent=self.user_agent)

        log_msg = ('Lithops v{} init for OpenWhisk - Namespace: {}'
                   .format(__version__, self.namespace))
        if not self.log_active:
            print(log_msg)
        logger.info("OpenWhisk client created successfully")
示例#5
0
    def __init__(self, config, executor_id, internal_storage, compute_handler,
                 job_monitor):
        log_level = logger.getEffectiveLevel()
        self.log_active = log_level != logging.WARNING
        self.log_level = LOGGER_LEVEL if not self.log_active else log_level

        self.config = config
        self.executor_id = executor_id
        self.storage_config = extract_storage_config(self.config)
        self.internal_storage = internal_storage
        self.compute_handler = compute_handler
        self.is_lithops_worker = is_lithops_worker()
        self.job_monitor = job_monitor

        self.workers = self.config['lithops'].get('workers')
        if self.workers:
            logger.debug('ExecutorID {} - Total workers: {}'.format(
                self.executor_id, self.workers))

        prom_enabled = self.config['lithops'].get('telemetry', False)
        prom_config = self.config.get('prometheus', {})
        self.prometheus = PrometheusExporter(prom_enabled, prom_config)

        self.mode = self.config['lithops']['mode']
        self.backend = self.config['lithops']['backend']
        self.runtime_name = self.config[self.backend]['runtime']

        self.customized_runtime = self.config[self.mode].get(
            'customized_runtime', False)
示例#6
0
    def __init__(self, aliyun_fc_config, storage_config):
        logger.debug("Creating Aliyun Function Compute client")
        self.log_active = logger.getEffectiveLevel() != logging.WARNING
        self.name = 'aliyun_fc'
        self.config = aliyun_fc_config
        self.is_lithops_worker = is_lithops_worker()
        self.version = 'lithops_{}'.format(__version__)

        self.user_agent = aliyun_fc_config['user_agent']
        if 'service' in aliyun_fc_config:
            self.service_name = aliyun_fc_config['service']
        else:
           self.service_name = aliyunfc_config.SERVICE_NAME

        self.endpoint = aliyun_fc_config['public_endpoint']
        self.access_key_id = aliyun_fc_config['access_key_id']
        self.access_key_secret = aliyun_fc_config['access_key_secret']

        logger.debug("Set Aliyun FC Service to {}".format(self.service_name))
        logger.debug("Set Aliyun FC Endpoint to {}".format(self.endpoint))

        self.fc_client = fc2.Client(endpoint=self.endpoint,
                                    accessKeyID=self.access_key_id,
                                    accessKeySecret=self.access_key_secret)

        msg = COMPUTE_CLI_MSG.format('Aliyun Function Compute')
        logger.info("{}".format(msg))
示例#7
0
    def __init__(self, code_engine_config, storage_config):
        logger.debug("Creating Code Engine client")
        self.log_active = logger.getEffectiveLevel() != logging.WARNING
        self.name = 'code_engine'
        self.code_engine_config = code_engine_config
        self.is_lithops_worker = is_lithops_worker()
        self.storage_config = storage_config
        self.internal_storage = InternalStorage(storage_config)

        config.load_kube_config(
            config_file=code_engine_config.get('kubectl_config'))
        self.capi = client.CustomObjectsApi()

        self.user_agent = code_engine_config['user_agent']
        contexts = config.list_kube_config_contexts(
            config_file=code_engine_config.get('kubectl_config'))

        current_context = contexts[1].get('context')
        self.user = current_context.get('user')

        self.user_key = self.user
        self.package = 'lithops_v{}_{}'.format(__version__, self.user_key)
        self.namespace = current_context.get('namespace', 'default')
        self.cluster = current_context.get('cluster')

        log_msg = ('Lithops v{} init for Code Engine - Namespace: {} - '
                   'Cluster: {} - User {}'.format(__version__, self.namespace,
                                                  self.cluster, self.user))
        if not self.log_active:
            print(log_msg)
        self.job_def_ids = set()
        logger.info("Code Engine client created successfully")
示例#8
0
    def __init__(self, ibm_cos_config, **kwargs):
        logger.debug("Creating IBM COS client")
        self.ibm_cos_config = ibm_cos_config
        self.is_lithops_worker = is_lithops_worker()
        user_agent = self.ibm_cos_config['user_agent']

        api_key = None
        if 'api_key' in self.ibm_cos_config:
            api_key = self.ibm_cos_config.get('api_key')
            api_key_type = 'COS'
        elif 'iam_api_key' in self.ibm_cos_config:
            api_key = self.ibm_cos_config.get('iam_api_key')
            api_key_type = 'IAM'

        service_endpoint = self.ibm_cos_config.get('endpoint').replace('http:', 'https:')
        if self.is_lithops_worker and 'private_endpoint' in self.ibm_cos_config:
            service_endpoint = self.ibm_cos_config.get('private_endpoint')
            if api_key:
                service_endpoint = service_endpoint.replace('http:', 'https:')

        logger.debug("Set IBM COS Endpoint to {}".format(service_endpoint))

        if {'secret_key', 'access_key'} <= set(self.ibm_cos_config):
            logger.debug("Using access_key and secret_key")
            access_key = self.ibm_cos_config.get('access_key')
            secret_key = self.ibm_cos_config.get('secret_key')
            client_config = ibm_botocore.client.Config(max_pool_connections=128,
                                                       user_agent_extra=user_agent,
                                                       connect_timeout=CONN_READ_TIMEOUT,
                                                       read_timeout=CONN_READ_TIMEOUT,
                                                       retries={'max_attempts': OBJ_REQ_RETRIES})

            self.cos_client = ibm_boto3.client('s3',
                                               aws_access_key_id=access_key,
                                               aws_secret_access_key=secret_key,
                                               config=client_config,
                                               endpoint_url=service_endpoint)

        elif api_key is not None:
            client_config = ibm_botocore.client.Config(signature_version='oauth',
                                                       max_pool_connections=128,
                                                       user_agent_extra=user_agent,
                                                       connect_timeout=CONN_READ_TIMEOUT,
                                                       read_timeout=CONN_READ_TIMEOUT,
                                                       retries={'max_attempts': OBJ_REQ_RETRIES})

            token = self.ibm_cos_config.get('token', None)
            token_expiry_time = self.ibm_cos_config.get('token_expiry_time', None)

            iam_token_manager = IBMTokenManager(api_key, api_key_type, token, token_expiry_time)
            token, token_expiry_time = iam_token_manager.get_token()

            self.ibm_cos_config['token'] = token
            self.ibm_cos_config['token_expiry_time'] = token_expiry_time

            self.cos_client = ibm_boto3.client('s3', token_manager=iam_token_manager._token_manager,
                                               config=client_config,
                                               endpoint_url=service_endpoint)

        logger.info("IBM COS client created successfully")
示例#9
0
    def __init__(self, standalone_config):
        self.config = standalone_config
        self.backend_name = self.config['backend']
        self.runtime = self.config['runtime']
        self.is_lithops_worker = is_lithops_worker()

        self.start_timeout = self.config.get('start_timeout', 300)

        self.auto_dismantle = self.config.get('auto_dismantle')
        self.hard_dismantle_timeout = self.config.get('hard_dismantle_timeout')
        self.soft_dismantle_timeout = self.config.get('soft_dismantle_timeout')

        try:
            module_location = 'lithops.standalone.backends.{}'.format(
                self.backend_name)
            sb_module = importlib.import_module(module_location)
            StandaloneBackend = getattr(sb_module, 'StandaloneBackend')
            self.backend = StandaloneBackend(self.config[self.backend_name])

        except Exception as e:
            logger.error("There was an error trying to create the "
                         "{} standalone backend".format(self.backend_name))
            raise e

        self.log_monitors = {}

        self.ssh_credentials = self.backend.get_ssh_credentials()
        self.ip_address = self.backend.get_ip_address()

        from lithops.util.ssh_client import SSHClient
        self.ssh_client = SSHClient(self.ssh_credentials)

        logger.debug("Standalone handler created successfully")
示例#10
0
    def __init__(self, code_engine_config, internal_storage):
        logger.debug("Creating IBM Code Engine client")
        self.name = 'code_engine'
        self.type = 'batch'
        self.code_engine_config = code_engine_config
        self.internal_storage = internal_storage

        self.kubecfg_path = code_engine_config.get('kubecfg_path')
        self.user_agent = code_engine_config['user_agent']

        self.iam_api_key = code_engine_config.get('iam_api_key', None)
        self.namespace = code_engine_config.get('namespace', None)
        self.region = code_engine_config.get('region', None)

        self.ibm_token_manager = None
        self.is_lithops_worker = is_lithops_worker()

        if self.namespace and self.region:
            self.cluster = ce_config.CLUSTER_URL.format(self.region)

        if self.iam_api_key and not self.is_lithops_worker:
            self._get_iam_token()

        else:
            try:
                config.load_kube_config(config_file=self.kubecfg_path)
                logger.debug("Loading kubecfg file")
                contexts = config.list_kube_config_contexts(
                    config_file=self.kubecfg_path)
                current_context = contexts[1].get('context')
                self.namespace = current_context.get('namespace')
                self.cluster = current_context.get('cluster')

                if self.iam_api_key:
                    self._get_iam_token()

            except Exception:
                logger.debug('Loading incluster kubecfg')
                config.load_incluster_config()

        self.code_engine_config['namespace'] = self.namespace
        self.code_engine_config['cluster'] = self.cluster
        logger.debug("Set namespace to {}".format(self.namespace))
        logger.debug("Set cluster to {}".format(self.cluster))

        self.custom_api = client.CustomObjectsApi()
        self.core_api = client.CoreV1Api()

        try:
            self.region = self.cluster.split('//')[1].split('.')[1]
        except Exception:
            self.region = self.cluster.replace('http://',
                                               '').replace('https://', '')

        self.jobs = []  # list to store executed jobs (job_keys)

        msg = COMPUTE_CLI_MSG.format('IBM Code Engine')
        logger.info("{} - Region: {}".format(msg, self.region))
示例#11
0
def current_process():
    """
    Return process object representing the current process
    """
    if is_lithops_worker():
        p = CloudProcess(name=os.environ.get('LITHOPS_MP_WORKER_NAME'))
        p._pid = os.environ.get('__LITHOPS_SESSION_ID', '-1')
        return p
    else:
        return _mp.current_process()
示例#12
0
    def __init__(self, lithops_config, internal_storage, token_bucket_q):
        self.config = lithops_config
        self.internal_storage = internal_storage
        self.token_bucket_q = token_bucket_q
        self.is_lithops_worker = is_lithops_worker()
        self.monitors = {}

        self.rabbitmq_monitor = self.config['lithops'].get('rabbitmq_monitor', False)
        if self.rabbitmq_monitor:
            self.rabbit_amqp_url = self.config['rabbitmq'].get('amqp_url')
示例#13
0
    def __init__(self, ibm_cf_config, storage_config):
        logger.debug("Creating IBM Cloud Functions client")
        self.name = 'ibm_cf'
        self.config = ibm_cf_config
        self.is_lithops_worker = is_lithops_worker()

        self.user_agent = ibm_cf_config['user_agent']
        self.region = ibm_cf_config['region']
        self.endpoint = ibm_cf_config['regions'][self.region]['endpoint']
        self.namespace = ibm_cf_config['regions'][self.region]['namespace']
        self.namespace_id = ibm_cf_config['regions'][self.region].get(
            'namespace_id', None)
        self.api_key = ibm_cf_config['regions'][self.region].get(
            'api_key', None)
        self.iam_api_key = ibm_cf_config.get('iam_api_key', None)

        logger.debug("Set IBM CF Namespace to {}".format(self.namespace))
        logger.debug("Set IBM CF Endpoint to {}".format(self.endpoint))

        self.user_key = self.api_key.split(
            ':')[1][:4] if self.api_key else self.iam_api_key[:4]
        self.package = 'lithops_v{}_{}'.format(__version__, self.user_key)

        if self.api_key:
            enc_api_key = str.encode(self.api_key)
            auth_token = base64.encodebytes(enc_api_key).replace(b'\n', b'')
            auth = 'Basic %s' % auth_token.decode('UTF-8')

            self.cf_client = OpenWhiskClient(endpoint=self.endpoint,
                                             namespace=self.namespace,
                                             auth=auth,
                                             user_agent=self.user_agent)

        elif self.iam_api_key:
            api_key_type = 'IAM'
            token = self.config.get('token', None)
            token_expiry_time = self.config.get('token_expiry_time', None)
            self.ibm_token_manager = IBMTokenManager(self.iam_api_key,
                                                     api_key_type, token,
                                                     token_expiry_time)
            token, token_expiry_time = self.ibm_token_manager.get_token()

            self.config['token'] = token
            self.config['token_expiry_time'] = token_expiry_time

            auth = 'Bearer ' + token

            self.cf_client = OpenWhiskClient(endpoint=self.endpoint,
                                             namespace=self.namespace_id,
                                             auth=auth,
                                             user_agent=self.user_agent)

        msg = COMPUTE_CLI_MSG.format('IBM CF')
        logger.info("{} - Region: {} - Namespace: {}".format(
            msg, self.region, self.namespace))
示例#14
0
    def get_runtime_meta(self, key):
        """
        Get the metadata given a runtime name.
        :param runtime: name of the runtime
        :return: runtime metadata
        """

        global RUNTIME_META_CACHE

        path = [RUNTIMES_PREFIX, __version__, key + ".meta.json"]
        filename_local_path = os.path.join(CACHE_DIR, *path)

        if '/'.join(path) in RUNTIME_META_CACHE:
            logger.debug("Runtime metadata found in local memory cache")
            return RUNTIME_META_CACHE['/'.join(path)]

        elif not is_lithops_worker() and os.path.exists(filename_local_path):
            logger.debug("Runtime metadata found in local disk cache")
            with open(filename_local_path, "r") as f:
                runtime_meta = json.loads(f.read())
            RUNTIME_META_CACHE['/'.join(path)] = runtime_meta
            return runtime_meta

        else:
            logger.debug(
                "Runtime metadata not found in local cache. Retrieving it from storage"
            )
            try:
                obj_key = '/'.join(path).replace('\\', '/')
                logger.debug(
                    'Trying to download runtime metadata from: {}://{}/{}'.
                    format(self.backend, self.bucket, obj_key))
                json_str = self.storage.get_object(self.bucket, obj_key)
                logger.debug('Runtime metadata found in storage')
                runtime_meta = json.loads(json_str.decode("ascii"))

                # Save runtime meta to cache
                try:
                    if not os.path.exists(
                            os.path.dirname(filename_local_path)):
                        os.makedirs(os.path.dirname(filename_local_path))

                    with open(filename_local_path, "w") as f:
                        f.write(json.dumps(runtime_meta))
                except Exception as e:
                    logger.error(
                        "Could not save runtime meta to local cache: {}".
                        format(e))

                RUNTIME_META_CACHE['/'.join(path)] = runtime_meta
                return runtime_meta
            except StorageNoSuchKeyError:
                logger.debug('Runtime metadata not found in storage')
                return None
示例#15
0
    def __init__(self, config, bucket=None, executor_id=None):
        self.bucket = bucket
        self.config = config
        self.auth = oss2.Auth(self.config['access_key_id'],
                              self.config['access_key_secret'])

        if is_lithops_worker():
            self.endpoint = self.config['internal_endpoint']
        else:
            self.endpoint = self.config['public_endpoint']

        self.bucket = oss2.Bucket(self.auth, self.endpoint, self.bucket)
示例#16
0
 def _copy_lithops_to_tmp(self):
     if is_lithops_worker() and os.path.isfile(RUNNER):
         return
     os.makedirs(LITHOPS_TEMP_DIR, exist_ok=True)
     try:
         shutil.rmtree(os.path.join(LITHOPS_TEMP_DIR, 'lithops'))
     except FileNotFoundError:
         pass
     shutil.copytree(LITHOPS_LOCATION,
                     os.path.join(LITHOPS_TEMP_DIR, 'lithops'))
     src_handler = os.path.join(LITHOPS_LOCATION, 'localhost', 'runner.py')
     copyfile(src_handler, RUNNER)
示例#17
0
    def get_token(self):
        if (self._token_manager._is_expired()
                or self._get_token_minutes_diff() < 11
            ) and not is_lithops_worker():
            logger.debug("Using IBM {} API Key - Requesting new token".format(
                self.api_key_type))
            self._generate_new_token()

        token = self._token_manager._token
        token_expiry_time = self._token_manager._expiry_time.strftime(
            '%Y-%m-%d %H:%M:%S.%f%z')

        return token, token_expiry_time
示例#18
0
    def get_token(self):
        """
        Gets a new token within a mutex block to prevent multiple threads
        requesting new tokens at the same time.
        """
        if (self._token_manager._is_expired() or self._is_token_expired()) \
           and not is_lithops_worker():
            logger.debug("Token expired. Requesting new token".format(self.api_key_type))
            self._generate_new_token()

        token = self._token_manager._token
        token_expiry_time = self._token_manager._expiry_time.strftime('%Y-%m-%d %H:%M:%S.%f%z')

        return token, token_expiry_time
示例#19
0
    def __init__(self, standalone_config):
        self.config = standalone_config
        self.backend_name = self.config['backend']
        self.start_timeout = self.config['start_timeout']
        self.exec_mode = self.config['exec_mode']
        self.workers_policy = self.config.get('workers_policy', 'permissive')  # by default not forcing the creation of all workers
        self.is_lithops_worker = is_lithops_worker()

        module_location = f'lithops.standalone.backends.{self.backend_name}'
        sb_module = importlib.import_module(module_location)
        StandaloneBackend = getattr(sb_module, 'StandaloneBackend')
        self.backend = StandaloneBackend(self.config[self.backend_name], self.exec_mode)

        self.jobs = []  # list to store executed jobs (job_keys)
        logger.debug("Standalone handler created successfully")
示例#20
0
    def __init__(self, config, executor_id, internal_storage, compute_handler):
        self.log_active = logger.getEffectiveLevel() != logging.WARNING
        self.config = config
        self.executor_id = executor_id
        self.storage_config = extract_storage_config(self.config)
        self.internal_storage = internal_storage
        self.compute_handler = compute_handler
        self.is_lithops_worker = is_lithops_worker()

        self.workers = self.config['lithops'].get('workers')
        logger.debug('ExecutorID {} - Total available workers: {}'.format(
            self.executor_id, self.workers))

        executor = self.config['lithops']['executor']
        self.runtime_name = self.config[executor]['runtime']
示例#21
0
    def __init__(self, config):
        logger.debug("Creating Alibaba Object Storage client")
        self.config = config
        self.auth = oss2.Auth(self.config['access_key_id'], self.config['access_key_secret'])


        if is_lithops_worker():
            self.endpoint = self.config['internal_endpoint']
        else:
            self.endpoint = self.config['public_endpoint']

        # Connection pool size in aliyun_oss must be updated to avoid "connection pool is full" type errors.
        oss2.defaults.connection_pool_size = CONNECTION_POOL_SIZE

        msg = STORAGE_CLI_MSG.format('Alibaba Object Storage')
        logger.info("{} - Endpoint: {}".format(msg, self.endpoint))
示例#22
0
    def __init__(self, standalone_config):
        self.config = standalone_config
        self.backend_name = self.config['backend']
        self.runtime = self.config['runtime']
        self.is_lithops_worker = is_lithops_worker()

        self.start_timeout = self.config.get('start_timeout', 300)
        self.auto_dismantle = self.config.get('auto_dismantle')
        self.hard_dismantle_timeout = self.config.get('hard_dismantle_timeout')
        self.soft_dismantle_timeout = self.config.get('soft_dismantle_timeout')
        self.pull_runtime = self.config.get('pull_runtime', True)
        self.exec_mode = self.config.get('exec_mode', 'consume')

        module_location = 'lithops.standalone.backends.{}'.format(self.backend_name)
        sb_module = importlib.import_module(module_location)
        StandaloneBackend = getattr(sb_module, 'StandaloneBackend')
        self.backend = StandaloneBackend(self.config[self.backend_name], self.exec_mode)

        self.jobs = []  # list to store executed jobs (job_keys)
        logger.debug("Standalone handler created successfully")
示例#23
0
文件: storage.py 项目: kpavel/lithops
    def put_runtime_meta(self, key, runtime_meta):
        """
        Put the metadata given a runtime config.
        :param runtime: name of the runtime
        :param runtime_meta metadata
        """
        path = [RUNTIMES_PREFIX, __version__,  key+".meta.json"]
        obj_key = '/'.join(path).replace('\\', '/')
        logger.debug("Uploading runtime metadata to: {}://{}/{}"
                     .format(self.backend, self.bucket, obj_key))
        self.storage.put_object(self.bucket, obj_key, json.dumps(runtime_meta))

        if not is_lithops_worker():
            filename_local_path = os.path.join(CACHE_DIR, *path)
            logger.debug("Storing runtime metadata into local cache: {}".format(filename_local_path))

            if not os.path.exists(os.path.dirname(filename_local_path)):
                os.makedirs(os.path.dirname(filename_local_path))

            with open(filename_local_path, "w") as f:
                f.write(json.dumps(runtime_meta))
示例#24
0
    def __init__(self, config, executor_id, internal_storage, compute_handler):
        super().__init__(config, executor_id, internal_storage,
                         compute_handler)

        self.remote_invoker = self.config['serverless'].get(
            'remote_invoker', False)

        self.invokers = []
        self.ongoing_activations = 0

        if not is_lithops_worker() and is_unix_system():
            self.token_bucket_q = mp.Queue()
            self.pending_calls_q = mp.Queue()
            self.running_flag = mp.Value('i', 0)
        else:
            self.token_bucket_q = queue.Queue()
            self.pending_calls_q = queue.Queue()
            self.running_flag = SimpleNamespace(value=0)

        self.job_monitor = JobMonitor(self.config, self.internal_storage,
                                      self.token_bucket_q)

        logger.debug('ExecutorID {} - Serverless invoker created'.format(
            self.executor_id))
示例#25
0
    def __init__(self, type=None, mode=None, config=None, backend=None, storage=None,
                 runtime=None, runtime_memory=None, rabbitmq_monitor=None,
                 workers=None, remote_invoker=None, log_level=None):

        mode = mode or type

        if mode is None:
            config = default_config(copy.deepcopy(config))
            mode = config['lithops']['mode']

        if mode not in [LOCALHOST, SERVERLESS, STANDALONE]:
            raise Exception("Function executor mode must be one of '{}', '{}' "
                            "or '{}'".format(LOCALHOST, SERVERLESS, STANDALONE))

        if log_level:
            setup_logger(log_level)

        if type is not None:
            logger.warning("'type' parameter is deprecated and it will be removed"
                           "in future releases. Use 'mode' parameter instead")

        config_ow = {'lithops': {'mode': mode}, mode: {}}

        if runtime is not None:
            config_ow[mode]['runtime'] = runtime
        if backend is not None:
            config_ow[mode]['backend'] = backend
        if runtime_memory is not None:
            config_ow[mode]['runtime_memory'] = int(runtime_memory)
        if remote_invoker is not None:
            config_ow[mode]['remote_invoker'] = remote_invoker

        if storage is not None:
            config_ow['lithops']['storage'] = storage
        if workers is not None:
            config_ow['lithops']['workers'] = workers
        if rabbitmq_monitor is not None:
            config_ow['lithops']['rabbitmq_monitor'] = rabbitmq_monitor

        self.config = default_config(copy.deepcopy(config), config_ow)

        self.log_active = logger.getEffectiveLevel() != logging.WARNING
        self.is_lithops_worker = is_lithops_worker()
        self.executor_id = create_executor_id()

        self.data_cleaner = self.config['lithops'].get('data_cleaner', True)
        if self.data_cleaner and not self.is_lithops_worker:
            spawn_cleaner = int(self.executor_id.split('-')[1]) == 0
            atexit.register(self.clean, spawn_cleaner=spawn_cleaner,
                            clean_cloudobjects=False)

        self.rabbitmq_monitor = self.config['lithops'].get('rabbitmq_monitor', False)

        if self.rabbitmq_monitor:
            if 'rabbitmq' in self.config and 'amqp_url' in self.config['rabbitmq']:
                self.rabbit_amqp_url = self.config['rabbitmq'].get('amqp_url')
            else:
                raise Exception("You cannot use rabbitmq_mnonitor since "
                                "'amqp_url' is not present in configuration")

        storage_config = extract_storage_config(self.config)
        self.internal_storage = InternalStorage(storage_config)
        self.storage = self.internal_storage.storage

        self.futures = []
        self.cleaned_jobs = set()
        self.total_jobs = 0
        self.last_call = None

        if mode == LOCALHOST:
            localhost_config = extract_localhost_config(self.config)
            self.compute_handler = LocalhostHandler(localhost_config)

            self.invoker = StandaloneInvoker(self.config,
                                             self.executor_id,
                                             self.internal_storage,
                                             self.compute_handler)
        elif mode == SERVERLESS:
            serverless_config = extract_serverless_config(self.config)
            self.compute_handler = ServerlessHandler(serverless_config,
                                                     storage_config)

            self.invoker = ServerlessInvoker(self.config,
                                             self.executor_id,
                                             self.internal_storage,
                                             self.compute_handler)
        elif mode == STANDALONE:
            standalone_config = extract_standalone_config(self.config)
            self.compute_handler = StandaloneHandler(standalone_config)

            self.invoker = StandaloneInvoker(self.config,
                                             self.executor_id,
                                             self.internal_storage,
                                             self.compute_handler)

        logger.info('{} Executor created with ID: {}'
                    .format(mode.capitalize(), self.executor_id))
示例#26
0
    def __init__(self,
                 mode=None,
                 config=None,
                 backend=None,
                 storage=None,
                 runtime=None,
                 runtime_memory=None,
                 monitoring=None,
                 workers=None,
                 remote_invoker=None,
                 log_level=False):
        """ Create a FunctionExecutor Class """
        if mode and mode not in [LOCALHOST, SERVERLESS, STANDALONE]:
            raise Exception("Function executor mode must be one of '{}', '{}' "
                            "or '{}'".format(LOCALHOST, SERVERLESS,
                                             STANDALONE))

        self.is_lithops_worker = is_lithops_worker()

        # setup lithops logging
        if not self.is_lithops_worker:
            # if is lithops worker, logging has been set up in entry_point.py
            if log_level:
                setup_lithops_logger(log_level)
            elif log_level is False and logger.getEffectiveLevel(
            ) == logging.WARNING:
                # Set default logging from config
                setup_lithops_logger(*get_log_info(config))

        # load mode of execution
        mode = mode or get_mode(backend, config)
        config_ow = {'lithops': {'mode': mode}, mode: {}}

        # overwrite user-provided parameters
        if runtime is not None:
            config_ow[mode]['runtime'] = runtime
        if backend is not None:
            config_ow[mode]['backend'] = backend
        if runtime_memory is not None:
            config_ow[mode]['runtime_memory'] = int(runtime_memory)
        if remote_invoker is not None:
            config_ow[mode]['remote_invoker'] = remote_invoker

        if storage is not None:
            config_ow['lithops']['storage'] = storage
        if workers is not None:
            config_ow['lithops']['workers'] = workers
        if monitoring is not None:
            config_ow['lithops']['monitoring'] = monitoring

        self.config = default_config(copy.deepcopy(config), config_ow)

        self.executor_id = create_executor_id()

        self.data_cleaner = self.config['lithops'].get('data_cleaner', True)
        if self.data_cleaner and not self.is_lithops_worker:
            spawn_cleaner = int(self.executor_id.split('-')[1]) == 0
            atexit.register(self.clean,
                            spawn_cleaner=spawn_cleaner,
                            clean_cloudobjects=False)

        storage_config = extract_storage_config(self.config)
        self.internal_storage = InternalStorage(storage_config)
        self.storage = self.internal_storage.storage

        self.futures = []
        self.cleaned_jobs = set()
        self.total_jobs = 0
        self.last_call = None

        if mode == LOCALHOST:
            localhost_config = extract_localhost_config(self.config)
            self.compute_handler = LocalhostHandler(localhost_config)
        elif mode == SERVERLESS:
            serverless_config = extract_serverless_config(self.config)
            self.compute_handler = ServerlessHandler(serverless_config,
                                                     self.internal_storage)
        elif mode == STANDALONE:
            standalone_config = extract_standalone_config(self.config)
            self.compute_handler = StandaloneHandler(standalone_config)

        # Create the monitoring system
        monitoring_backend = self.config['lithops']['monitoring'].lower()
        monitoring_config = self.config.get(monitoring_backend)
        self.job_monitor = JobMonitor(monitoring_backend, monitoring_config)

        # Create the invokder
        self.invoker = create_invoker(self.config, self.executor_id,
                                      self.internal_storage,
                                      self.compute_handler, self.job_monitor)

        logger.info('{} Executor created with ID: {}'.format(
            mode.capitalize(), self.executor_id))

        self.log_path = None
示例#27
0
    def __init__(self,
                 mode: Optional[str] = None,
                 config: Optional[Dict[str, Any]] = None,
                 backend: Optional[str] = None,
                 storage: Optional[str] = None,
                 runtime: Optional[str] = None,
                 runtime_memory: Optional[int] = None,
                 monitoring: Optional[str] = None,
                 max_workers: Optional[int] = None,
                 worker_processes: Optional[int] = None,
                 remote_invoker: Optional[bool] = None,
                 log_level: Optional[str] = False):
        self.is_lithops_worker = is_lithops_worker()
        self.executor_id = create_executor_id()
        self.futures = []
        self.cleaned_jobs = set()
        self.total_jobs = 0
        self.last_call = None

        # setup lithops logging
        if not self.is_lithops_worker:
            # if is lithops worker, logging has been set up in entry_point.py
            if log_level:
                setup_lithops_logger(log_level)
            elif log_level is False and logger.getEffectiveLevel(
            ) == logging.WARNING:
                # Set default logging from config
                setup_lithops_logger(*get_log_info(config))

        # overwrite user-provided parameters
        config_ow = {'lithops': {}, 'backend': {}}
        if runtime is not None:
            config_ow['backend']['runtime'] = runtime
        if runtime_memory is not None:
            config_ow['backend']['runtime_memory'] = int(runtime_memory)
        if remote_invoker is not None:
            config_ow['backend']['remote_invoker'] = remote_invoker
        if worker_processes is not None:
            config_ow['backend']['worker_processes'] = worker_processes
        if max_workers is not None:
            config_ow['backend']['max_workers'] = max_workers

        if mode is not None:
            config_ow['lithops']['mode'] = mode
        if backend is not None:
            config_ow['lithops']['backend'] = backend
        if storage is not None:
            config_ow['lithops']['storage'] = storage
        if monitoring is not None:
            config_ow['lithops']['monitoring'] = monitoring

        # Load configuration
        self.config = default_config(copy.deepcopy(config), config_ow)

        self.data_cleaner = self.config['lithops'].get('data_cleaner', True)
        if self.data_cleaner and not self.is_lithops_worker:
            atexit.register(self.clean,
                            clean_cloudobjects=False,
                            clean_fn=True)

        storage_config = extract_storage_config(self.config)
        self.internal_storage = InternalStorage(storage_config)
        self.storage = self.internal_storage.storage

        self.backend = self.config['lithops']['backend']
        self.mode = self.config['lithops']['mode']

        if self.mode == LOCALHOST:
            localhost_config = extract_localhost_config(self.config)
            self.compute_handler = LocalhostHandler(localhost_config)
        elif self.mode == SERVERLESS:
            serverless_config = extract_serverless_config(self.config)
            self.compute_handler = ServerlessHandler(serverless_config,
                                                     self.internal_storage)
        elif self.mode == STANDALONE:
            standalone_config = extract_standalone_config(self.config)
            self.compute_handler = StandaloneHandler(standalone_config)

        # Create the monitoring system
        self.job_monitor = JobMonitor(executor_id=self.executor_id,
                                      internal_storage=self.internal_storage,
                                      config=self.config)

        # Create the invoker
        self.invoker = create_invoker(config=self.config,
                                      executor_id=self.executor_id,
                                      internal_storage=self.internal_storage,
                                      compute_handler=self.compute_handler,
                                      job_monitor=self.job_monitor)

        logger.debug(
            f'Function executor for {self.backend} created with ID: {self.executor_id}'
        )

        self.log_path = None
示例#28
0
def wait(fs,
         internal_storage=None,
         throw_except=True,
         timeout=None,
         return_when=ALL_COMPLETED,
         download_results=False,
         job_monitor=None,
         threadpool_size=THREADPOOL_SIZE,
         wait_dur_sec=WAIT_DUR_SEC):
    """
    Wait for the Future instances (possibly created by different Executor instances)
    given by fs to complete. Returns a named 2-tuple of sets. The first set, named done,
    contains the futures that completed (finished or cancelled futures) before the wait
    completed. The second set, named not_done, contains the futures that did not complete
    (pending or running futures). timeout can be used to control the maximum number of
    seconds to wait before returning.

    :param fs: Futures list. Default None
    :param throw_except: Re-raise exception if call raised. Default True.
    :param return_when: Percentage of done futures
    :param download_results: Download results. Default false (Only get statuses)
    :param timeout: Timeout of waiting for results.
    :param threadpool_zise: Number of threads to use. Default 64
    :param wait_dur_sec: Time interval between each check.

    :return: `(fs_done, fs_notdone)`
        where `fs_done` is a list of futures that have completed
        and `fs_notdone` is a list of futures that have not completed.
    :rtype: 2-tuple of list
    """
    if not fs:
        return

    if type(fs) != list and type(fs) != FuturesList:
        fs = [fs]

    if download_results:
        msg = 'ExecutorID {} - Getting results from functions'.format(
            fs[0].executor_id)
        fs_done = [f for f in fs if f.done]
        fs_not_done = [f for f in fs if not f.done]

    else:
        msg = 'ExecutorID {} - Waiting for {}% of functions to complete'.format(
            fs[0].executor_id, return_when)
        fs_done = [f for f in fs if f.success or f.done]
        fs_not_done = [f for f in fs if not (f.success or f.done)]

    logger.info(msg)

    if not fs_not_done:
        return fs_done, fs_not_done

    if is_unix_system() and timeout is not None:
        logger.debug('Setting waiting timeout to {} seconds'.format(timeout))
        error_msg = 'Timeout of {} seconds exceeded waiting for function activations to finish'.format(
            timeout)
        signal.signal(signal.SIGALRM, partial(timeout_handler, error_msg))
        signal.alarm(timeout)

    # Setup progress bar
    pbar = None
    if not is_lithops_worker() and logger.getEffectiveLevel() == logging.INFO:
        from tqdm.auto import tqdm
        if not is_notebook():
            print()
        pbar = tqdm(bar_format='  {l_bar}{bar}| {n_fmt}/{total_fmt}  ',
                    total=len(fs),
                    disable=None)
        pbar.update(len(fs_done))

    try:
        executors_data = _create_executors_data_from_futures(
            fs, internal_storage)

        if not job_monitor:
            for executor_data in executors_data:
                job_monitor = JobMonitor(
                    executor_id=executor_data.executor_id,
                    internal_storage=executor_data.internal_storage)
                job_monitor.start(fs=executor_data.futures)

        sleep_sec = wait_dur_sec if job_monitor.backend == 'storage' else 0.3

        if return_when == ALWAYS:
            for executor_data in executors_data:
                _get_executor_data(fs,
                                   executor_data,
                                   pbar=pbar,
                                   throw_except=throw_except,
                                   download_results=download_results,
                                   threadpool_size=threadpool_size)
        else:
            while not _check_done(fs, return_when, download_results):
                for executor_data in executors_data:
                    new_data = _get_executor_data(
                        fs,
                        executor_data,
                        pbar=pbar,
                        throw_except=throw_except,
                        download_results=download_results,
                        threadpool_size=threadpool_size)
                time.sleep(0 if new_data else sleep_sec)

    except KeyboardInterrupt as e:
        if download_results:
            not_dones_call_ids = [(f.job_id, f.call_id) for f in fs
                                  if not f.done]
        else:
            not_dones_call_ids = [(f.job_id, f.call_id) for f in fs
                                  if not f.success and not f.done]
        msg = ('Cancelled - Total Activations not done: {}'.format(
            len(not_dones_call_ids)))
        if pbar:
            pbar.close()
            print()
        logger.info(msg)
        raise e

    except Exception as e:
        raise e

    finally:
        if is_unix_system():
            signal.alarm(0)
        if pbar and not pbar.disable:
            pbar.close()
            if not is_notebook():
                print()

    if download_results:
        fs_done = [f for f in fs if f.done]
        fs_notdone = [f for f in fs if not f.done]
    else:
        fs_done = [f for f in fs if f.success or f.done]
        fs_notdone = [f for f in fs if not f.success and not f.done]

    return fs_done, fs_notdone
示例#29
0
    if 'r' in mode:
        if 'b' in mode:
            # we could get_data(stream=True) but some streams are not seekable
            return io.BytesIO(storage.get_data(filename))
        else:
            return io.StringIO(storage.get_data(filename).decode())

    if 'w' in mode:
        action = partial(storage.put_data, filename)
        if 'b' in mode:
            return DelayedBytesBuffer(action)
        else:
            return DelayedStringBuffer(action)


if not is_lithops_worker():
    try:
        _storage = CloudStorage()
    except FileNotFoundError:
        # should never happen unless we are using
        # this module classes for other purposes
        os = None
        open = None
    else:
        os = CloudFileProxy(_storage)
        open = partial(cloud_open, cloud_storage=_storage)
else:
    # should never be used unless we explicitly import
    # inside a function, which is not a good practice
    os = None
    open = None
示例#30
0
    def __init__(self,
                 type=None,
                 config=None,
                 backend=None,
                 storage=None,
                 runtime=None,
                 runtime_memory=None,
                 rabbitmq_monitor=None,
                 workers=None,
                 remote_invoker=None,
                 log_level=None):

        if type is None:
            config = default_config(copy.deepcopy(config))
            type = config['lithops']['executor']

        if log_level:
            default_logging_config(log_level)

        config_ow = {'lithops': {'executor': type}, type: {}}

        if runtime is not None:
            config_ow[type]['runtime'] = runtime
        if backend is not None:
            config_ow[type]['backend'] = backend
        if runtime_memory is not None:
            config_ow[type]['runtime_memory'] = int(runtime_memory)
        if remote_invoker is not None:
            config_ow[type]['remote_invoker'] = remote_invoker

        if storage is not None:
            config_ow['lithops']['storage'] = storage
        if workers is not None:
            config_ow['lithops']['workers'] = workers
        if rabbitmq_monitor is not None:
            config_ow['lithops']['rabbitmq_monitor'] = rabbitmq_monitor

        self.config = default_config(copy.deepcopy(config), config_ow)

        self.log_active = logger.getEffectiveLevel() != logging.WARNING
        self.is_lithops_worker = is_lithops_worker()
        self.executor_id = create_executor_id()

        self.data_cleaner = self.config['lithops'].get('data_cleaner', True)
        self.rabbitmq_monitor = self.config['lithops'].get(
            'rabbitmq_monitor', False)

        if self.rabbitmq_monitor:
            if 'rabbitmq' in self.config and 'amqp_url' in self.config[
                    'rabbitmq']:
                self.rabbit_amqp_url = self.config['rabbitmq'].get('amqp_url')
            else:
                raise Exception("You cannot use rabbitmq_mnonitor since "
                                "'amqp_url' is not present in configuration")

        self.storage_config = extract_storage_config(self.config)
        self.internal_storage = InternalStorage(self.storage_config)
        self.storage = self.internal_storage.storage

        self.futures = []
        self.total_jobs = 0
        self.cleaned_jobs = set()
        self.last_call = None

        if type == 'localhost':
            localhost_config = extract_localhost_config(self.config)
            self.compute_handler = LocalhostHandler(localhost_config)

            self.invoker = StandaloneInvoker(self.config, self.executor_id,
                                             self.internal_storage,
                                             self.compute_handler)
        elif type == 'serverless':
            serverless_config = extract_serverless_config(self.config)
            self.compute_handler = ServerlessHandler(serverless_config,
                                                     self.storage_config)

            self.invoker = ServerlessInvoker(self.config, self.executor_id,
                                             self.internal_storage,
                                             self.compute_handler)
        elif type == 'standalone':
            standalone_config = extract_standalone_config(self.config)
            self.compute_handler = StandaloneHandler(standalone_config)

            self.invoker = StandaloneInvoker(self.config, self.executor_id,
                                             self.internal_storage,
                                             self.compute_handler)
        else:
            raise Exception("Function executor type must be one of "
                            "'localhost', 'serverless' or 'standalone'")

        logger.info('{} Executor created with ID: {}'.format(
            type.capitalize(), self.executor_id))