Пример #1
0
    def __init__(self, aws_lambda_config, storage_config):
        """
        Initialize AWS Lambda Backend
        """
        logger.debug('Creating AWS Lambda client')

        self.name = 'aws_lambda'
        self.aws_lambda_config = aws_lambda_config

        self.user_key = aws_lambda_config['access_key_id'][-4:]
        self.package = 'lithops_v{}_{}'.format(lithops.__version__,
                                               self.user_key)
        self.region_name = aws_lambda_config['region_name']
        self.role_arn = aws_lambda_config['execution_role']

        logger.debug('Creating Boto3 AWS Session and Lambda Client')
        self.aws_session = boto3.Session(
            aws_access_key_id=aws_lambda_config['access_key_id'],
            aws_secret_access_key=aws_lambda_config['secret_access_key'],
            region_name=self.region_name)
        self.lambda_client = self.aws_session.client(
            'lambda', region_name=self.region_name)

        self.internal_storage = InternalStorage(storage_config)

        msg = COMPUTE_CLI_MSG.format('AWS Lambda')
        logger.info("{} - Region: {}".format(msg, self.region_name))
Пример #2
0
def create(name, mode, memory, timeout, config):
    setup_logger(logging.DEBUG)
    config = default_config(config)
    storage_config = extract_storage_config(config)
    internal_storage = InternalStorage(storage_config)

    mode = config['lithops']['mode'] if not mode else mode
    if mode == SERVERLESS:
        compute_config = extract_serverless_config(config)
        compute_handler = ServerlessHandler(compute_config, storage_config)
        mem = memory if memory else compute_config['runtime_memory']
        to = timeout if timeout else compute_config['runtime_timeout']
        runtime_key = compute_handler.get_runtime_key(name, mem)
        runtime_meta = compute_handler.create_runtime(name, mem, timeout=to)
    elif mode == STANDALONE:
        compute_config = extract_standalone_config(config)
        compute_handler = StandaloneHandler(compute_config)
        runtime_key = compute_handler.get_runtime_key(name)
        runtime_meta = compute_handler.create_runtime(name)
    elif mode == LOCALHOST:
        compute_config = extract_localhost_config(config)
        compute_handler = LocalhostHandler(compute_config)
        runtime_key = compute_handler.get_runtime_key(name)
        runtime_meta = compute_handler.create_runtime(name)
    else:
        raise Exception('Unknown execution mode {}'.format(mode))

    try:
        internal_storage.put_runtime_meta(runtime_key, runtime_meta)
    except Exception:
        raise("Unable to upload 'preinstalled-modules' file into {}".format(internal_storage.backend))
Пример #3
0
def create(name, storage, backend, memory, timeout, config):
    """ Create a serverless runtime """
    if config:
        config = load_yaml_config(config)

    setup_lithops_logger(logging.DEBUG)

    mode = SERVERLESS
    config_ow = {'lithops': {'mode': mode}}
    if storage:
        config_ow['lithops']['storage'] = storage
    if backend:
        config_ow[mode] = {'backend': backend}
    config = default_config(config, config_ow)

    if name:
        verify_runtime_name(name)
    else:
        name = config[mode]['runtime']

    logger.info('Creating new lithops runtime: {}'.format(name))
    storage_config = extract_storage_config(config)
    internal_storage = InternalStorage(storage_config)

    compute_config = extract_serverless_config(config)
    compute_handler = ServerlessHandler(compute_config, storage_config)
    mem = memory if memory else compute_config['runtime_memory']
    to = timeout if timeout else compute_config['runtime_timeout']
    runtime_key = compute_handler.get_runtime_key(name, mem)
    runtime_meta = compute_handler.create_runtime(name, mem, timeout=to)

    try:
        internal_storage.put_runtime_meta(runtime_key, runtime_meta)
    except Exception:
        raise ("Unable to upload 'preinstalled-modules' file into {}".format(internal_storage.backend))
Пример #4
0
def update(name, config, backend, storage):
    """ Update a serverless runtime """
    if config:
        config = load_yaml_config(config)

    verify_runtime_name(name)
    setup_lithops_logger(logging.DEBUG)

    mode = SERVERLESS
    config_ow = {'lithops': {'mode': mode}}
    if storage:
        config_ow['lithops']['storage'] = storage
    if backend:
        config_ow[mode] = {'backend': backend}
    config = default_config(config, config_ow)

    storage_config = extract_storage_config(config)
    internal_storage = InternalStorage(storage_config)
    compute_config = extract_serverless_config(config)
    compute_handler = ServerlessHandler(compute_config, storage_config)

    timeout = compute_config['runtime_memory']
    logger.info('Updating runtime: {}'.format(name))

    runtimes = compute_handler.list_runtimes(name)

    for runtime in runtimes:
        runtime_key = compute_handler.get_runtime_key(runtime[0], runtime[1])
        runtime_meta = compute_handler.create_runtime(runtime[0], runtime[1], timeout)

        try:
            internal_storage.put_runtime_meta(runtime_key, runtime_meta)
        except Exception:
            raise("Unable to upload 'preinstalled-modules' file into {}".format(internal_storage.backend))
Пример #5
0
def update(name, config, backend, storage, debug):
    """ Update a serverless runtime """
    setup_lithops_logger(logging.DEBUG)

    verify_runtime_name(name)

    if config:
        config = load_yaml_config(config)

    config_ow = set_config_ow(backend, storage, runtime_name=name)
    config = default_config(config, config_ow)

    if config['lithops']['mode'] != SERVERLESS:
        raise Exception('"lithops runtime update" command is only valid for serverless backends')

    storage_config = extract_storage_config(config)
    internal_storage = InternalStorage(storage_config)
    compute_config = extract_serverless_config(config)
    compute_handler = ServerlessHandler(compute_config, internal_storage)

    timeout = compute_config['runtime_memory']
    logger.info('Updating runtime: {}'.format(name))

    runtimes = compute_handler.list_runtimes(name)

    for runtime in runtimes:
        runtime_key = compute_handler.get_runtime_key(runtime[0], runtime[1])
        runtime_meta = compute_handler.deploy_runtime(runtime[0], runtime[1], timeout)
        internal_storage.put_runtime_meta(runtime_key, runtime_meta)
Пример #6
0
    def __init__(self, gcp_functions_config, storage_config):
        self.log_active = logger.getEffectiveLevel() != logging.WARNING
        self.name = 'gcp_functions'
        self.gcp_functions_config = gcp_functions_config
        self.package = 'lithops_v' + __version__

        self.region = gcp_functions_config['region']
        self.service_account = gcp_functions_config['service_account']
        self.project = gcp_functions_config['project_name']
        self.credentials_path = gcp_functions_config['credentials_path']
        self.num_retries = gcp_functions_config['retries']
        self.retry_sleeps = gcp_functions_config['retry_sleeps']

        # Instantiate storage client (used to upload function bin)
        self.internal_storage = InternalStorage(
            gcp_functions_config['storage'])

        # Setup Pub/Sub client
        try:  # Get credentials from JSON file
            service_account_info = json.load(open(self.credentials_path))
            credentials = jwt.Credentials.from_service_account_info(
                service_account_info, audience=AUDIENCE)
            credentials_pub = credentials.with_claims(audience=AUDIENCE)
        except:  # Get credentials from gcp function environment
            credentials_pub = None
        self.publisher_client = pubsub_v1.PublisherClient(
            credentials=credentials_pub)

        log_msg = 'lithops v{} init for GCP Functions - Project: {} - Region: {}'.format(
            __version__, self.project, self.region)
        logger.info(log_msg)

        if not self.log_active:
            print(log_msg)
Пример #7
0
def delete(name, config, backend, storage, debug):
    """ delete a serverless runtime """
    setup_lithops_logger(logging.DEBUG)

    verify_runtime_name(name)

    if config:
        config = load_yaml_config(config)

    setup_lithops_logger(logging.DEBUG)

    config_ow = set_config_ow(backend, storage, runtime_name=name)
    config = default_config(config, config_ow)

    if config['lithops']['mode'] != SERVERLESS:
        raise Exception('"lithops runtime delete" command is only valid for serverless backends')

    storage_config = extract_storage_config(config)
    internal_storage = InternalStorage(storage_config)
    compute_config = extract_serverless_config(config)
    compute_handler = ServerlessHandler(compute_config, internal_storage)

    runtimes = compute_handler.list_runtimes(name)
    for runtime in runtimes:
        compute_handler.delete_runtime(runtime[0], runtime[1])
        runtime_key = compute_handler.get_runtime_key(runtime[0], runtime[1])
        internal_storage.delete_runtime_meta(runtime_key)
Пример #8
0
def delete(name, config, backend, storage):
    """ delete a serverless runtime """
    if config:
        config = load_yaml_config(config)

    setup_lithops_logger(logging.DEBUG)

    mode = SERVERLESS
    config_ow = {'lithops': {'mode': mode}}
    if storage:
        config_ow['lithops']['storage'] = storage
    if backend:
        config_ow[mode] = {'backend': backend}
    config = default_config(config, config_ow)

    if name:
        verify_runtime_name(name)
    else:
        name = config[mode]['runtime']

    storage_config = extract_storage_config(config)
    internal_storage = InternalStorage(storage_config)
    compute_config = extract_serverless_config(config)
    compute_handler = ServerlessHandler(compute_config, storage_config)

    runtimes = compute_handler.list_runtimes(name)
    for runtime in runtimes:
        compute_handler.delete_runtime(runtime[0], runtime[1])
        runtime_key = compute_handler.get_runtime_key(runtime[0], runtime[1])
        internal_storage.delete_runtime_meta(runtime_key)
Пример #9
0
def deploy(name, storage, backend, memory, timeout, config, debug):
    """ deploy a serverless runtime """
    setup_lithops_logger(logging.DEBUG)

    verify_runtime_name(name)

    if config:
        config = load_yaml_config(config)

    config_ow = set_config_ow(backend, storage, runtime_name=name)
    config = default_config(config, config_ow)

    if config['lithops']['mode'] != SERVERLESS:
        raise Exception('"lithops runtime create" command is only valid for serverless backends')

    logger.info('Creating new lithops runtime: {}'.format(name))
    storage_config = extract_storage_config(config)
    internal_storage = InternalStorage(storage_config)

    compute_config = extract_serverless_config(config)
    compute_handler = ServerlessHandler(compute_config, internal_storage)
    mem = memory if memory else compute_config['runtime_memory']
    to = timeout if timeout else compute_config['runtime_timeout']
    runtime_key = compute_handler.get_runtime_key(name, mem)
    runtime_meta = compute_handler.deploy_runtime(name, mem, timeout=to)
    internal_storage.put_runtime_meta(runtime_key, runtime_meta)
Пример #10
0
    def __init__(self, gcp_functions_config, storage_config):
        self.name = 'gcp_functions'
        self.gcp_functions_config = gcp_functions_config
        self.package = 'lithops_v' + __version__

        self.region = gcp_functions_config['region']
        self.service_account = gcp_functions_config['service_account']
        self.project = gcp_functions_config['project_name']
        self.credentials_path = gcp_functions_config['credentials_path']
        self.num_retries = gcp_functions_config['retries']
        self.retry_sleep = gcp_functions_config['retry_sleep']

        # Instantiate storage client (used to upload function bin)
        self.internal_storage = InternalStorage(storage_config)

        # Setup Pub/Sub client
        try:  # Get credentials from JSON file
            service_account_info = json.load(open(self.credentials_path))
            credentials = jwt.Credentials.from_service_account_info(
                service_account_info, audience=AUDIENCE)
            credentials_pub = credentials.with_claims(audience=AUDIENCE)
        except:  # Get credentials from gcp function environment
            credentials_pub = None
        self.publisher_client = pubsub_v1.PublisherClient(
            credentials=credentials_pub)

        msg = COMPUTE_CLI_MSG.format('GCP Functions')
        logger.info("{} - Region: {} - Project: {}".format(
            msg, self.region, self.project))
Пример #11
0
    def result(self, throw_except=True, internal_storage=None):
        """
        Return the value returned by the call.
        If the call raised an exception, this method will raise the same exception
        If the future is cancelled before completing then CancelledError will be raised.

        :param throw_except: Reraise exception if call raised. Default true.
        :param internal_storage: Storage handler to poll cloud storage. Default None.
        :return: Result of the call.
        :raises CancelledError: If the job is cancelled before completed.
        :raises TimeoutError: If job is not complete after `timeout` seconds.
        """
        if not self._produce_output:
            self._set_state(ResponseFuture.State.Done)

        if self._state == ResponseFuture.State.New:
            raise ValueError("task not yet invoked")

        if self._state == ResponseFuture.State.Done:
            return self._return_val

        if self._state == ResponseFuture.State.Futures:
            return self._new_futures

        if internal_storage is None:
            internal_storage = InternalStorage(storage_config=self._storage_config)

        self.status(throw_except=throw_except, internal_storage=internal_storage)

        if self._state == ResponseFuture.State.Done:
            return self._return_val

        if not self._call_output:
            call_output = internal_storage.get_call_output(self.executor_id, self.job_id, self.call_id)
            self._output_query_count += 1

            while call_output is None and self._output_query_count < self.GET_RESULT_MAX_RETRIES:
                time.sleep(self.GET_RESULT_SLEEP_SECS)
                call_output = internal_storage.get_call_output(self.executor_id, self.job_id, self.call_id)
                self._output_query_count += 1

            if call_output is None:
                if throw_except:
                    raise Exception('Unable to get the result from call {} - '
                                    'Activation ID: {}'.format(self.call_id, self.activation_id))
                else:
                    self._set_state(ResponseFuture.State.Error)
                    return None

            self._call_output = pickle.loads(call_output)

            self.stats['host_result_done_tstamp'] = time.time()
            self.stats['host_result_query_count'] = self._output_query_count
            logger.debug('ExecutorID {} | JobID {} - Got output from call {} - Activation '
                         'ID: {}'.format(self.executor_id, self.job_id, self.call_id, self.activation_id))

        self._return_val = self._call_output['result'] if not self._return_val else self._return_val
        self._set_state(ResponseFuture.State.Done)
        return self._return_val
Пример #12
0
def delete_runtime(name, config=None):
    config = default_config(config)
    storage_config = extract_storage_config(config)
    internal_storage = InternalStorage(storage_config)
    compute_config = extract_compute_config(config)
    compute_handler = Compute(compute_config)

    runtimes = compute_handler.list_runtimes(name)
    for runtime in runtimes:
        compute_handler.delete_runtime(runtime[0], runtime[1])
        runtime_key = compute_handler.get_runtime_key(runtime[0], runtime[1])
        internal_storage.delete_runtime_meta(runtime_key)
Пример #13
0
def delete(name, config):
    verify_runtime_name(name)
    setup_logger(logging.DEBUG)
    config = default_config(config)
    storage_config = extract_storage_config(config)
    internal_storage = InternalStorage(storage_config)
    compute_config = extract_serverless_config(config)
    compute_handler = ServerlessHandler(compute_config, storage_config)

    runtimes = compute_handler.list_runtimes(name)
    for runtime in runtimes:
        compute_handler.delete_runtime(runtime[0], runtime[1])
        runtime_key = compute_handler.get_runtime_key(runtime[0], runtime[1])
        internal_storage.delete_runtime_meta(runtime_key)
Пример #14
0
    def __init__(self, config, num_invokers, log_level):
        self.config = config
        self.num_invokers = num_invokers
        self.log_level = log_level
        storage_config = extract_storage_config(self.config)
        self.internal_storage = InternalStorage(storage_config)

        self.remote_invoker = self.config['lithops'].get(
            'remote_invoker', False)
        self.rabbitmq_monitor = self.config['lithops'].get(
            'rabbitmq_monitor', False)
        if self.rabbitmq_monitor:
            self.rabbit_amqp_url = self.config['rabbitmq'].get('amqp_url')

        self.num_workers = self.config['lithops'].get('workers')
        logger.info('Total workers: {}'.format(self.num_workers))

        serverless_config = extract_serverless_config(self.config)
        self.serverless_handler = ServerlessHandler(serverless_config,
                                                    storage_config)

        self.token_bucket_q = mp.Queue()
        self.pending_calls_q = mp.Queue()

        self.job_monitor = JobMonitor(self.config, self.internal_storage,
                                      self.token_bucket_q)
Пример #15
0
def clean_all(config=None):
    logger.info('Cleaning all Lithops information')
    config = default_config(config)
    storage_config = extract_storage_config(config)
    internal_storage = InternalStorage(storage_config)

    default_executor = config['lithops']['executor']
    if default_executor == 'localhost':
        compute_config = extract_localhost_config(config)
        compute_handler = LocalhostHandler(compute_config)
    elif default_executor == 'serverless':
        compute_config = extract_serverless_config(config)
        compute_handler = ServerlessHandler(compute_config, storage_config)
    elif default_executor == 'standalone':
        compute_config = extract_standalone_config(config)
        compute_handler = StandaloneHandler(compute_config)

    compute_handler.clean()

    # Clean object storage temp dirs
    storage = internal_storage.storage
    clean_bucket(storage, storage_config['bucket'], RUNTIMES_PREFIX, sleep=1)
    clean_bucket(storage, storage_config['bucket'], JOBS_PREFIX, sleep=1)

    # Clean localhost executor temp dirs
    shutil.rmtree(STORAGE_DIR, ignore_errors=True)
    # Clean local lithops cache
    shutil.rmtree(CACHE_DIR, ignore_errors=True)
Пример #16
0
def function_invoker(job_payload):
    """
    Method used as a remote invoker
    """
    config = job_payload['config']
    job = SimpleNamespace(**job_payload['job'])

    env = {
        'LITHOPS_WORKER': 'True',
        'PYTHONUNBUFFERED': 'True',
        '__LITHOPS_SESSION_ID': job.job_key
    }
    os.environ.update(env)

    # Create the monitoring system
    monitoring_backend = config['lithops']['monitoring'].lower()
    monitoring_config = config.get(monitoring_backend)
    job_monitor = JobMonitor(monitoring_backend, monitoring_config)

    storage_config = extract_storage_config(config)
    internal_storage = InternalStorage(storage_config)

    serverless_config = extract_serverless_config(config)
    compute_handler = ServerlessHandler(serverless_config, storage_config)

    # Create the invokder
    invoker = FaaSRemoteInvoker(config, job.executor_id, internal_storage,
                                compute_handler, job_monitor)
    invoker.run_job(job)
Пример #17
0
def _create_jobs_from_futures(fs, internal_storage):
    """
    Creates a dummy job necessary for the job monitor
    """
    jobs = []
    present_jobs = {f.job_key for f in fs}

    for job_key in present_jobs:
        job_data = {}
        job = SimpleNamespace()
        job.futures = [f for f in fs if f.job_key == job_key]
        job.total_calls = len(job.futures)
        f = job.futures[0]
        job.executor_id = f.executor_id
        job.job_id = f.job_id
        job.job_key = f.job_key
        job_data['job'] = job

        if internal_storage and internal_storage.backend == f._storage_config[
                'backend']:
            job_data['internal_storage'] = internal_storage
        else:
            job_data['internal_storage'] = InternalStorage(f._storage_config)

        jobs.append(job_data)

    return jobs
Пример #18
0
    def __init__(self, code_engine_config, storage_config):
        logger.debug("Creating Code Engine client")
        self.log_active = logger.getEffectiveLevel() != logging.WARNING
        self.name = 'code_engine'
        self.code_engine_config = code_engine_config
        self.is_lithops_worker = is_lithops_worker()
        self.storage_config = storage_config
        self.internal_storage = InternalStorage(storage_config)

        config.load_kube_config(
            config_file=code_engine_config.get('kubectl_config'))
        self.capi = client.CustomObjectsApi()

        self.user_agent = code_engine_config['user_agent']
        contexts = config.list_kube_config_contexts(
            config_file=code_engine_config.get('kubectl_config'))

        current_context = contexts[1].get('context')
        self.user = current_context.get('user')

        self.user_key = self.user
        self.package = 'lithops_v{}_{}'.format(__version__, self.user_key)
        self.namespace = current_context.get('namespace', 'default')
        self.cluster = current_context.get('cluster')

        log_msg = ('Lithops v{} init for Code Engine - Namespace: {} - '
                   'Cluster: {} - User {}'.format(__version__, self.namespace,
                                                  self.cluster, self.user))
        if not self.log_active:
            print(log_msg)
        self.job_def_ids = set()
        logger.info("Code Engine client created successfully")
Пример #19
0
def clean(mode, config, debug):
    log_level = 'INFO' if not debug else 'DEBUG'
    setup_logger(log_level)
    logger.info('Cleaning all Lithops information')
    config = default_config(config)
    storage_config = extract_storage_config(config)
    internal_storage = InternalStorage(storage_config)

    mode = config['lithops']['mode'] if not mode else mode
    if mode == LOCALHOST:
        compute_config = extract_localhost_config(config)
        compute_handler = LocalhostHandler(compute_config)
    elif mode == SERVERLESS:
        compute_config = extract_serverless_config(config)
        compute_handler = ServerlessHandler(compute_config, storage_config)
    elif mode == STANDALONE:
        compute_config = extract_standalone_config(config)
        compute_handler = StandaloneHandler(compute_config)

    compute_handler.clean()

    # Clean object storage temp dirs
    storage = internal_storage.storage
    clean_bucket(storage, storage_config['bucket'], RUNTIMES_PREFIX, sleep=1)
    clean_bucket(storage, storage_config['bucket'], JOBS_PREFIX, sleep=1)

    # Clean localhost executor temp dirs
    shutil.rmtree(LITHOPS_TEMP_DIR, ignore_errors=True)
    # Clean local lithops cache
    shutil.rmtree(CACHE_DIR, ignore_errors=True)
Пример #20
0
def function_handler(payload):
    job = SimpleNamespace(**payload)
    setup_lithops_logger(job.log_level)

    processes = min(job.worker_processes, len(job.call_ids))
    logger.info('Tasks received: {} - Concurrent processes: {}'.format(
        len(job.call_ids), processes))

    env = job.extra_env
    env['LITHOPS_WORKER'] = 'True'
    env['PYTHONUNBUFFERED'] = 'True'
    os.environ.update(env)

    storage_config = extract_storage_config(job.config)
    internal_storage = InternalStorage(storage_config)
    job.func = get_function_and_modules(job, internal_storage)
    job_data = get_function_data(job, internal_storage)

    if processes == 1:
        job_queue = queue.Queue()
        for call_id in job.call_ids:
            data = job_data.pop(0)
            job_queue.put((job, call_id, data))
        job_queue.put(ShutdownSentinel())
        process_runner(job_queue)
    else:
        manager = SyncManager()
        manager.start()
        job_queue = manager.Queue()
        job_runners = []

        for call_id in job.call_ids:
            data = job_data.pop(0)
            job_queue.put((job, call_id, data))

        for i in range(processes):
            job_queue.put(ShutdownSentinel())

        for runner_id in range(processes):
            p = mp.Process(target=process_runner, args=(job_queue, ))
            job_runners.append(p)
            p.start()
            logger.info('Worker process {} started'.format(runner_id))

        for runner in job_runners:
            runner.join()

        manager.shutdown()

    # Delete modules path from syspath
    module_path = os.path.join(MODULES_DIR, job.job_key)
    if module_path in sys.path:
        sys.path.remove(module_path)

    # Unset specific job env vars
    for key in job.extra_env:
        os.environ.pop(key, None)
    os.environ.pop('__LITHOPS_TOTAL_EXECUTORS', None)
Пример #21
0
def runtime_packages(storage_config):
    logger.info("Extracting preinstalled Python modules...")
    internal_storage = InternalStorage(storage_config)

    runtime_meta = dict()
    mods = list(pkgutil.iter_modules())
    runtime_meta['preinstalls'] = [entry for entry in sorted([[mod, is_pkg] for _, mod, is_pkg in mods])]
    python_version = sys.version_info
    runtime_meta['python_ver'] = str(python_version[0])+"."+str(python_version[1])
    
    activation_id = storage_config['activation_id']

    status_key = create_runtime_meta_key(JOBS_PREFIX, activation_id)
    logger.debug("Runtime metadata key {}".format(status_key))
    dmpd_response_status = json.dumps(runtime_meta)
    drs = sizeof_fmt(len(dmpd_response_status))
    logger.info("Storing execution stats - Size: {}".format(drs))
    internal_storage.put_data(status_key, dmpd_response_status)
Пример #22
0
def delete(name, config, backend):
    """ delete a serverless runtime """
    verify_runtime_name(name)
    setup_logger(logging.DEBUG)

    mode = SERVERLESS
    config_ow = {'lithops': {'mode': mode}}
    if backend:
        config_ow[mode] = {'backend': backend}
    config = default_config(config, config_ow)

    storage_config = extract_storage_config(config)
    internal_storage = InternalStorage(storage_config)
    compute_config = extract_serverless_config(config)
    compute_handler = ServerlessHandler(compute_config, storage_config)

    runtimes = compute_handler.list_runtimes(name)
    for runtime in runtimes:
        compute_handler.delete_runtime(runtime[0], runtime[1])
        runtime_key = compute_handler.get_runtime_key(runtime[0], runtime[1])
        internal_storage.delete_runtime_meta(runtime_key)
Пример #23
0
    def __init__(self, config, num_invokers, log_level):
        self.config = config
        self.num_invokers = num_invokers
        self.log_level = log_level
        storage_config = extract_storage_config(self.config)
        self.internal_storage = InternalStorage(storage_config)
        compute_config = extract_compute_config(self.config)

        self.remote_invoker = self.config['lithops'].get(
            'remote_invoker', False)
        self.rabbitmq_monitor = self.config['lithops'].get(
            'rabbitmq_monitor', False)
        if self.rabbitmq_monitor:
            self.rabbit_amqp_url = self.config['rabbitmq'].get('amqp_url')

        self.num_workers = self.config['lithops'].get('workers')
        logger.debug('Total workers: {}'.format(self.num_workers))

        self.compute_handlers = []
        cb = compute_config['backend']
        regions = compute_config[cb].get('region')
        if regions and type(regions) == list:
            for region in regions:
                new_compute_config = compute_config.copy()
                new_compute_config[cb]['region'] = region
                compute_handler = Compute(new_compute_config)
                self.compute_handlers.append(compute_handler)
        else:
            if cb == 'localhost':
                global CBH
                if cb in CBH and CBH[
                        cb].compute_handler.num_workers != self.num_workers:
                    del CBH[cb]
                if cb in CBH:
                    logger.info(
                        '{} compute handler already started'.format(cb))
                    compute_handler = CBH[cb]
                    self.compute_handlers.append(compute_handler)
                else:
                    logger.info('Starting {} compute handler'.format(cb))
                    compute_handler = Compute(compute_config)
                    CBH[cb] = compute_handler
                    self.compute_handlers.append(compute_handler)
            else:
                compute_handler = Compute(compute_config)
                self.compute_handlers.append(compute_handler)

        self.token_bucket_q = Queue()
        self.pending_calls_q = Queue()

        self.job_monitor = JobMonitor(self.config, self.internal_storage,
                                      self.token_bucket_q)
Пример #24
0
def update(name, config):
    verify_runtime_name(name)
    setup_logger(logging.DEBUG)
    config = default_config(config)
    storage_config = extract_storage_config(config)
    internal_storage = InternalStorage(storage_config)
    compute_config = extract_serverless_config(config)
    compute_handler = ServerlessHandler(compute_config, storage_config)

    timeout = config['lithops']['runtime_timeout']
    logger.info('Updating runtime: {}'.format(name))

    runtimes = compute_handler.list_runtimes(name)

    for runtime in runtimes:
        runtime_key = compute_handler.get_runtime_key(runtime[0], runtime[1])
        runtime_meta = compute_handler.create_runtime(runtime[0], runtime[1], timeout)

        try:
            internal_storage.put_runtime_meta(runtime_key, runtime_meta)
        except Exception:
            raise("Unable to upload 'preinstalled-modules' file into {}".format(internal_storage.backend))
Пример #25
0
def _init_keeper(config):
    global keeper
    compute_config = extract_compute_config(config)
    client = get_remote_client(compute_config)
    storage_config = extract_storage_config(config)
    internal_storage = InternalStorage(storage_config)

    keeper = threading.Thread(target=budget_keeper,
                              args=(
                                  client,
                                  internal_storage,
                              ))
    keeper.start()
Пример #26
0
def create_runtime(name, memory=None, config=None):
    config = default_config(config)
    storage_config = extract_storage_config(config)
    internal_storage = InternalStorage(storage_config)
    compute_config = extract_compute_config(config)
    compute_handler = Compute(compute_config)

    memory = config['lithops']['runtime_memory'] if not memory else memory
    timeout = config['lithops']['runtime_timeout']

    logger.info('Creating runtime: {}, memory: {}'.format(name, memory))

    runtime_key = compute_handler.get_runtime_key(name, memory)
    runtime_meta = compute_handler.create_runtime(name,
                                                  memory,
                                                  timeout=timeout)

    try:
        internal_storage.put_runtime_meta(runtime_key, runtime_meta)
    except Exception:
        raise ("Unable to upload 'preinstalled-modules' file into {}".format(
            internal_storage.backend))
Пример #27
0
def function_handler(payload):
    job = SimpleNamespace(**payload)
    processes = min(job.worker_processes, len(job.call_ids))

    logger.info('Tasks received: {} - Concurrent workers: {}'.format(
        len(job.call_ids), processes))

    storage_config = extract_storage_config(job.config)
    internal_storage = InternalStorage(storage_config)
    job.func = get_function_and_modules(job, internal_storage)
    job_data = get_function_data(job, internal_storage)

    if processes == 1:
        job_queue = queue.Queue()
        for task_id in job.call_ids:
            data = job_data.pop(0)
            job_queue.put((job, task_id, data))
        job_queue.put(ShutdownSentinel())
        process_runner(job_queue, internal_storage)
    else:
        manager = SyncManager()
        manager.start()
        job_queue = manager.Queue()
        job_runners = []

        for runner_id in range(processes):
            p = mp.Process(target=process_runner,
                           args=(job_queue, internal_storage))
            job_runners.append(p)
            p.start()
            logger.info('Worker process {} started'.format(runner_id))

        for call_id in job.call_ids:
            data = job_data.pop(0)
            job_queue.put((job, call_id, data))

        for i in range(processes):
            job_queue.put(ShutdownSentinel())

        for runner in job_runners:
            runner.join()

        manager.shutdown()

    # Delete modules path from syspath
    module_path = os.path.join(MODULES_DIR, job.job_key)
    if module_path in sys.path:
        sys.path.remove(module_path)
Пример #28
0
def get_result(fs,
               throw_except=True,
               timeout=None,
               THREADPOOL_SIZE=128,
               WAIT_DUR_SEC=1,
               internal_storage=None):
    """
    For getting the results from all function activations

    :param fs: Futures list. Default None
    :param throw_except: Reraise exception if call raised. Default True.
    :param verbose: Shows some information prints. Default False
    :param timeout: Timeout for waiting for results.
    :param THREADPOOL_SIZE: Number of threads to use. Default 128
    :param WAIT_DUR_SEC: Time interval between each check.
    :return: The result of the future/s
    """
    if type(fs) != list:
        fs = [fs]

    if not internal_storage:
        internal_storage = InternalStorage(fs[0]._storage_config)

    fs_done, _ = wait(fs=fs,
                      throw_except=throw_except,
                      timeout=timeout,
                      download_results=True,
                      internal_storage=internal_storage,
                      THREADPOOL_SIZE=THREADPOOL_SIZE,
                      WAIT_DUR_SEC=WAIT_DUR_SEC)
    result = []
    fs_done = [f for f in fs_done if not f.futures and f._produce_output]
    for f in fs_done:
        result.append(
            f.result(throw_except=throw_except,
                     internal_storage=internal_storage))

    logger.debug("Finished getting results")

    if len(result) == 1:
        return result[0]

    return result
Пример #29
0
def run_tests(test_to_run, config=None):
    global CONFIG, STORAGE_CONFIG, STORAGE

    CONFIG = json.load(args.config) if config else default_config()
    STORAGE_CONFIG = extract_storage_config(CONFIG)
    STORAGE = InternalStorage(STORAGE_CONFIG).storage

    suite = unittest.TestSuite()
    if test_to_run == 'all':
        suite.addTest(unittest.makeSuite(TestLithops))
    else:
        try:
            suite.addTest(TestLithops(test_to_run))
        except ValueError:
            print("unknown test, use: --help")
            sys.exit()

    runner = unittest.TextTestRunner()
    runner.run(suite)
Пример #30
0
def clean_all(config=None):
    logger.info('Cleaning all Lithops information')
    config = default_config(config)
    storage_config = extract_storage_config(config)
    internal_storage = InternalStorage(storage_config)
    compute_config = extract_compute_config(config)
    compute_handler = Compute(compute_config)

    # Clean localhost executor temp dirs
    shutil.rmtree(STORAGE_FOLDER, ignore_errors=True)
    shutil.rmtree(DOCKER_FOLDER, ignore_errors=True)

    # Clean object storage temp dirs
    compute_handler.delete_all_runtimes()
    storage = internal_storage.storage
    clean_bucket(storage, storage_config['bucket'], RUNTIMES_PREFIX, sleep=1)
    clean_bucket(storage, storage_config['bucket'], JOBS_PREFIX, sleep=1)

    # Clean local lithops cache
    shutil.rmtree(CACHE_DIR, ignore_errors=True)