def update(name, config, backend, storage, debug): """ Update a serverless runtime """ setup_lithops_logger(logging.DEBUG) verify_runtime_name(name) if config: config = load_yaml_config(config) config_ow = set_config_ow(backend, storage, runtime_name=name) config = default_config(config, config_ow) if config['lithops']['mode'] != SERVERLESS: raise Exception('"lithops runtime update" command is only valid for serverless backends') storage_config = extract_storage_config(config) internal_storage = InternalStorage(storage_config) compute_config = extract_serverless_config(config) compute_handler = ServerlessHandler(compute_config, internal_storage) timeout = compute_config['runtime_memory'] logger.info('Updating runtime: {}'.format(name)) runtimes = compute_handler.list_runtimes(name) for runtime in runtimes: runtime_key = compute_handler.get_runtime_key(runtime[0], runtime[1]) runtime_meta = compute_handler.deploy_runtime(runtime[0], runtime[1], timeout) internal_storage.put_runtime_meta(runtime_key, runtime_meta)
def create(name, storage, backend, memory, timeout, config): """ Create a serverless runtime """ if config: config = load_yaml_config(config) setup_lithops_logger(logging.DEBUG) mode = SERVERLESS config_ow = {'lithops': {'mode': mode}} if storage: config_ow['lithops']['storage'] = storage if backend: config_ow[mode] = {'backend': backend} config = default_config(config, config_ow) if name: verify_runtime_name(name) else: name = config[mode]['runtime'] logger.info('Creating new lithops runtime: {}'.format(name)) storage_config = extract_storage_config(config) internal_storage = InternalStorage(storage_config) compute_config = extract_serverless_config(config) compute_handler = ServerlessHandler(compute_config, storage_config) mem = memory if memory else compute_config['runtime_memory'] to = timeout if timeout else compute_config['runtime_timeout'] runtime_key = compute_handler.get_runtime_key(name, mem) runtime_meta = compute_handler.create_runtime(name, mem, timeout=to) try: internal_storage.put_runtime_meta(runtime_key, runtime_meta) except Exception: raise ("Unable to upload 'preinstalled-modules' file into {}".format(internal_storage.backend))
def __init__(self, config, num_invokers, log_level): self.config = config self.num_invokers = num_invokers self.log_level = log_level storage_config = extract_storage_config(self.config) self.internal_storage = InternalStorage(storage_config) self.remote_invoker = self.config['lithops'].get( 'remote_invoker', False) self.rabbitmq_monitor = self.config['lithops'].get( 'rabbitmq_monitor', False) if self.rabbitmq_monitor: self.rabbit_amqp_url = self.config['rabbitmq'].get('amqp_url') self.num_workers = self.config['lithops'].get('workers') logger.info('Total workers: {}'.format(self.num_workers)) serverless_config = extract_serverless_config(self.config) self.serverless_handler = ServerlessHandler(serverless_config, storage_config) self.token_bucket_q = mp.Queue() self.pending_calls_q = mp.Queue() self.job_monitor = JobMonitor(self.config, self.internal_storage, self.token_bucket_q)
def list_runtimes(config, backend, debug): """ list all deployed serverless runtime. """ log_level = logging.INFO if not debug else logging.DEBUG setup_lithops_logger(log_level) if config: config = load_yaml_config(config) config_ow = set_config_ow(backend, runtime_name='None') config = default_config(config, config_ow, load_storage_config=False) if config['lithops']['mode'] != SERVERLESS: raise Exception('"lithops runtime list" command is only valid for serverless backends') compute_config = extract_serverless_config(config) compute_handler = ServerlessHandler(compute_config, None) runtimes = compute_handler.list_runtimes() if runtimes: width = max([len(runtime[0]) for runtime in runtimes]) print('\n{:{width}} \t {}'.format('Runtime Name', 'Memory Size (MB)', width=width)) print('-' * width, '\t', '-' * 20) for runtime in runtimes: name = runtime[0] mem = runtime[1] print('{:{width}} \t {}'.format(name, mem, width=width)) print() print('Total runtimes: {}'.format(len(runtimes))) else: width = 10 print('\n{:{width}} \t {}'.format('Runtime Name', 'Memory Size (MB)', width=width)) print('-' * width, '\t', '-' * 20) print('\nNo runtimes deployed')
def update(name, config, backend, storage): """ Update a serverless runtime """ if config: config = load_yaml_config(config) verify_runtime_name(name) setup_lithops_logger(logging.DEBUG) mode = SERVERLESS config_ow = {'lithops': {'mode': mode}} if storage: config_ow['lithops']['storage'] = storage if backend: config_ow[mode] = {'backend': backend} config = default_config(config, config_ow) storage_config = extract_storage_config(config) internal_storage = InternalStorage(storage_config) compute_config = extract_serverless_config(config) compute_handler = ServerlessHandler(compute_config, storage_config) timeout = compute_config['runtime_memory'] logger.info('Updating runtime: {}'.format(name)) runtimes = compute_handler.list_runtimes(name) for runtime in runtimes: runtime_key = compute_handler.get_runtime_key(runtime[0], runtime[1]) runtime_meta = compute_handler.create_runtime(runtime[0], runtime[1], timeout) try: internal_storage.put_runtime_meta(runtime_key, runtime_meta) except Exception: raise("Unable to upload 'preinstalled-modules' file into {}".format(internal_storage.backend))
def deploy(name, storage, backend, memory, timeout, config, debug): """ deploy a serverless runtime """ setup_lithops_logger(logging.DEBUG) verify_runtime_name(name) if config: config = load_yaml_config(config) config_ow = set_config_ow(backend, storage, runtime_name=name) config = default_config(config, config_ow) if config['lithops']['mode'] != SERVERLESS: raise Exception('"lithops runtime create" command is only valid for serverless backends') logger.info('Creating new lithops runtime: {}'.format(name)) storage_config = extract_storage_config(config) internal_storage = InternalStorage(storage_config) compute_config = extract_serverless_config(config) compute_handler = ServerlessHandler(compute_config, internal_storage) mem = memory if memory else compute_config['runtime_memory'] to = timeout if timeout else compute_config['runtime_timeout'] runtime_key = compute_handler.get_runtime_key(name, mem) runtime_meta = compute_handler.deploy_runtime(name, mem, timeout=to) internal_storage.put_runtime_meta(runtime_key, runtime_meta)
def create(name, mode, memory, timeout, config): setup_logger(logging.DEBUG) config = default_config(config) storage_config = extract_storage_config(config) internal_storage = InternalStorage(storage_config) mode = config['lithops']['mode'] if not mode else mode if mode == SERVERLESS: compute_config = extract_serverless_config(config) compute_handler = ServerlessHandler(compute_config, storage_config) mem = memory if memory else compute_config['runtime_memory'] to = timeout if timeout else compute_config['runtime_timeout'] runtime_key = compute_handler.get_runtime_key(name, mem) runtime_meta = compute_handler.create_runtime(name, mem, timeout=to) elif mode == STANDALONE: compute_config = extract_standalone_config(config) compute_handler = StandaloneHandler(compute_config) runtime_key = compute_handler.get_runtime_key(name) runtime_meta = compute_handler.create_runtime(name) elif mode == LOCALHOST: compute_config = extract_localhost_config(config) compute_handler = LocalhostHandler(compute_config) runtime_key = compute_handler.get_runtime_key(name) runtime_meta = compute_handler.create_runtime(name) else: raise Exception('Unknown execution mode {}'.format(mode)) try: internal_storage.put_runtime_meta(runtime_key, runtime_meta) except Exception: raise("Unable to upload 'preinstalled-modules' file into {}".format(internal_storage.backend))
def build(name, file, config): verify_runtime_name(name) setup_logger(logging.DEBUG) config = default_config(config) storage_config = extract_storage_config(config) compute_config = extract_serverless_config(config) compute_handler = ServerlessHandler(compute_config, storage_config) compute_handler.build_runtime(name, file)
def build(name, file, config, backend): """ build a serverless runtime. """ verify_runtime_name(name) setup_lithops_logger(logging.DEBUG) mode = SERVERLESS config_ow = {'lithops': {'mode': mode}} if backend: config_ow[mode] = {'backend': backend} config = default_config(config, config_ow) storage_config = extract_storage_config(config) compute_config = extract_serverless_config(config) compute_handler = ServerlessHandler(compute_config, storage_config) compute_handler.build_runtime(name, file)
def clean(mode, config, debug): log_level = 'INFO' if not debug else 'DEBUG' setup_logger(log_level) logger.info('Cleaning all Lithops information') config = default_config(config) storage_config = extract_storage_config(config) internal_storage = InternalStorage(storage_config) mode = config['lithops']['mode'] if not mode else mode if mode == LOCALHOST: compute_config = extract_localhost_config(config) compute_handler = LocalhostHandler(compute_config) elif mode == SERVERLESS: compute_config = extract_serverless_config(config) compute_handler = ServerlessHandler(compute_config, storage_config) elif mode == STANDALONE: compute_config = extract_standalone_config(config) compute_handler = StandaloneHandler(compute_config) compute_handler.clean() # Clean object storage temp dirs storage = internal_storage.storage clean_bucket(storage, storage_config['bucket'], RUNTIMES_PREFIX, sleep=1) clean_bucket(storage, storage_config['bucket'], JOBS_PREFIX, sleep=1) # Clean localhost executor temp dirs shutil.rmtree(LITHOPS_TEMP_DIR, ignore_errors=True) # Clean local lithops cache shutil.rmtree(CACHE_DIR, ignore_errors=True)
def clean_all(config=None): logger.info('Cleaning all Lithops information') config = default_config(config) storage_config = extract_storage_config(config) internal_storage = InternalStorage(storage_config) default_executor = config['lithops']['executor'] if default_executor == 'localhost': compute_config = extract_localhost_config(config) compute_handler = LocalhostHandler(compute_config) elif default_executor == 'serverless': compute_config = extract_serverless_config(config) compute_handler = ServerlessHandler(compute_config, storage_config) elif default_executor == 'standalone': compute_config = extract_standalone_config(config) compute_handler = StandaloneHandler(compute_config) compute_handler.clean() # Clean object storage temp dirs storage = internal_storage.storage clean_bucket(storage, storage_config['bucket'], RUNTIMES_PREFIX, sleep=1) clean_bucket(storage, storage_config['bucket'], JOBS_PREFIX, sleep=1) # Clean localhost executor temp dirs shutil.rmtree(STORAGE_DIR, ignore_errors=True) # Clean local lithops cache shutil.rmtree(CACHE_DIR, ignore_errors=True)
def function_invoker(job_payload): """ Method used as a remote invoker """ config = job_payload['config'] job = SimpleNamespace(**job_payload['job']) env = { 'LITHOPS_WORKER': 'True', 'PYTHONUNBUFFERED': 'True', '__LITHOPS_SESSION_ID': job.job_key } os.environ.update(env) # Create the monitoring system monitoring_backend = config['lithops']['monitoring'].lower() monitoring_config = config.get(monitoring_backend) job_monitor = JobMonitor(monitoring_backend, monitoring_config) storage_config = extract_storage_config(config) internal_storage = InternalStorage(storage_config) serverless_config = extract_serverless_config(config) compute_handler = ServerlessHandler(serverless_config, storage_config) # Create the invokder invoker = FaaSRemoteInvoker(config, job.executor_id, internal_storage, compute_handler, job_monitor) invoker.run_job(job)
def build(ctx, name, file, config, backend): """ build a serverless runtime. """ setup_lithops_logger(logging.DEBUG) verify_runtime_name(name) if config: config = load_yaml_config(config) config_ow = set_config_ow(backend, runtime_name=name) config = default_config(config, config_ow, load_storage_config=False) if config['lithops']['mode'] != SERVERLESS: raise Exception('"lithops build" command is only valid for serverless backends') compute_config = extract_serverless_config(config) compute_handler = ServerlessHandler(compute_config, None) compute_handler.build_runtime(name, file, ctx.args)
def delete(name, config, backend, storage): """ delete a serverless runtime """ if config: config = load_yaml_config(config) setup_lithops_logger(logging.DEBUG) mode = SERVERLESS config_ow = {'lithops': {'mode': mode}} if storage: config_ow['lithops']['storage'] = storage if backend: config_ow[mode] = {'backend': backend} config = default_config(config, config_ow) if name: verify_runtime_name(name) else: name = config[mode]['runtime'] storage_config = extract_storage_config(config) internal_storage = InternalStorage(storage_config) compute_config = extract_serverless_config(config) compute_handler = ServerlessHandler(compute_config, storage_config) runtimes = compute_handler.list_runtimes(name) for runtime in runtimes: compute_handler.delete_runtime(runtime[0], runtime[1]) runtime_key = compute_handler.get_runtime_key(runtime[0], runtime[1]) internal_storage.delete_runtime_meta(runtime_key)
def delete(name, config, backend, storage, debug): """ delete a serverless runtime """ setup_lithops_logger(logging.DEBUG) verify_runtime_name(name) if config: config = load_yaml_config(config) setup_lithops_logger(logging.DEBUG) config_ow = set_config_ow(backend, storage, runtime_name=name) config = default_config(config, config_ow) if config['lithops']['mode'] != SERVERLESS: raise Exception('"lithops runtime delete" command is only valid for serverless backends') storage_config = extract_storage_config(config) internal_storage = InternalStorage(storage_config) compute_config = extract_serverless_config(config) compute_handler = ServerlessHandler(compute_config, internal_storage) runtimes = compute_handler.list_runtimes(name) for runtime in runtimes: compute_handler.delete_runtime(runtime[0], runtime[1]) runtime_key = compute_handler.get_runtime_key(runtime[0], runtime[1]) internal_storage.delete_runtime_meta(runtime_key)
def update(name, config): verify_runtime_name(name) setup_logger(logging.DEBUG) config = default_config(config) storage_config = extract_storage_config(config) internal_storage = InternalStorage(storage_config) compute_config = extract_serverless_config(config) compute_handler = ServerlessHandler(compute_config, storage_config) timeout = config['lithops']['runtime_timeout'] logger.info('Updating runtime: {}'.format(name)) runtimes = compute_handler.list_runtimes(name) for runtime in runtimes: runtime_key = compute_handler.get_runtime_key(runtime[0], runtime[1]) runtime_meta = compute_handler.create_runtime(runtime[0], runtime[1], timeout) try: internal_storage.put_runtime_meta(runtime_key, runtime_meta) except Exception: raise("Unable to upload 'preinstalled-modules' file into {}".format(internal_storage.backend))
def create_runtime(name, memory=None, config=None): config = default_config(config) storage_config = extract_storage_config(config) internal_storage = InternalStorage(storage_config) compute_config = extract_serverless_config(config) compute_handler = ServerlessHandler(compute_config, storage_config) memory = config['lithops']['runtime_memory'] if not memory else memory timeout = config['lithops']['runtime_timeout'] logger.info('Creating runtime: {}, memory: {}'.format(name, memory)) runtime_key = compute_handler.get_runtime_key(name, memory) runtime_meta = compute_handler.create_runtime(name, memory, timeout=timeout) try: internal_storage.put_runtime_meta(runtime_key, runtime_meta) except Exception: raise ("Unable to upload 'preinstalled-modules' file into {}".format( internal_storage.backend))
def __init__(self, config, num_invokers, log_level): self.config = config self.num_invokers = num_invokers self.log_level = log_level storage_config = extract_storage_config(self.config) self.internal_storage = InternalStorage(storage_config) compute_config = extract_serverless_config(self.config) self.remote_invoker = self.config['lithops'].get( 'remote_invoker', False) self.rabbitmq_monitor = self.config['lithops'].get( 'rabbitmq_monitor', False) if self.rabbitmq_monitor: self.rabbit_amqp_url = self.config['rabbitmq'].get('amqp_url') self.num_workers = self.config['lithops'].get('workers') logger.debug('Total workers: {}'.format(self.num_workers)) self.serverless_handlers = [] cb = compute_config['backend'] regions = compute_config[cb].get('region') if regions and type(regions) == list: for region in regions: new_compute_config = compute_config.copy() new_compute_config[cb]['region'] = region serverless_handler = ServerlessHandler(new_compute_config, storage_config) self.serverless_handlers.append(serverless_handler) else: serverless_handler = ServerlessHandler(compute_config, storage_config) self.serverless_handlers.append(serverless_handler) self.token_bucket_q = Queue() self.pending_calls_q = Queue() self.job_monitor = JobMonitor(self.config, self.internal_storage, self.token_bucket_q)
def build(name, file, config, backend): """ build a serverless runtime. """ if config: config = load_yaml_config(config) setup_lithops_logger(logging.DEBUG) mode = SERVERLESS config_ow = {'lithops': {'mode': mode}} if backend: config_ow[mode] = {'backend': backend} config = default_config(config, config_ow) if name: verify_runtime_name(name) else: name = config[mode]['runtime'] storage_config = extract_storage_config(config) internal_storage = InternalStorage(storage_config) compute_config = extract_serverless_config(config) compute_handler = ServerlessHandler(compute_config, internal_storage) compute_handler.build_runtime(name, file)
def delete_runtime(name, config=None): config = default_config(config) storage_config = extract_storage_config(config) internal_storage = InternalStorage(storage_config) compute_config = extract_serverless_config(config) compute_handler = ServerlessHandler(compute_config, storage_config) runtimes = compute_handler.list_runtimes(name) for runtime in runtimes: compute_handler.delete_runtime(runtime[0], runtime[1]) runtime_key = compute_handler.get_runtime_key(runtime[0], runtime[1]) internal_storage.delete_runtime_meta(runtime_key)
def clean(config, mode, backend, storage, debug): if config: config = load_yaml_config(config) log_level = logging.INFO if not debug else logging.DEBUG setup_lithops_logger(log_level) logger.info('Cleaning all Lithops information') mode = mode or get_mode(backend, config) config_ow = {'lithops': {'mode': mode}} if storage: config_ow['lithops']['storage'] = storage if backend: config_ow[mode] = {'backend': backend} config = default_config(config, config_ow) storage_config = extract_storage_config(config) internal_storage = InternalStorage(storage_config) mode = config['lithops']['mode'] if not mode else mode if mode == LOCALHOST: compute_config = extract_localhost_config(config) compute_handler = LocalhostHandler(compute_config) elif mode == SERVERLESS: compute_config = extract_serverless_config(config) compute_handler = ServerlessHandler(compute_config, storage_config) elif mode == STANDALONE: compute_config = extract_standalone_config(config) compute_handler = StandaloneHandler(compute_config) compute_handler.clean() # Clean object storage temp dirs storage = internal_storage.storage clean_bucket(storage, storage_config['bucket'], RUNTIMES_PREFIX, sleep=1) clean_bucket(storage, storage_config['bucket'], JOBS_PREFIX, sleep=1) # Clean localhost executor temp dirs shutil.rmtree(LITHOPS_TEMP_DIR, ignore_errors=True) # Clean local lithops cache shutil.rmtree(CACHE_DIR, ignore_errors=True)
def delete(name, config, backend): """ delete a serverless runtime """ verify_runtime_name(name) setup_logger(logging.DEBUG) mode = SERVERLESS config_ow = {'lithops': {'mode': mode}} if backend: config_ow[mode] = {'backend': backend} config = default_config(config, config_ow) storage_config = extract_storage_config(config) internal_storage = InternalStorage(storage_config) compute_config = extract_serverless_config(config) compute_handler = ServerlessHandler(compute_config, storage_config) runtimes = compute_handler.list_runtimes(name) for runtime in runtimes: compute_handler.delete_runtime(runtime[0], runtime[1]) runtime_key = compute_handler.get_runtime_key(runtime[0], runtime[1]) internal_storage.delete_runtime_meta(runtime_key)
class ServerlessInvoker: """ Module responsible to perform the invocations against the serverless compute backend """ def __init__(self, config, num_invokers, log_level): self.config = config self.num_invokers = num_invokers self.log_level = log_level storage_config = extract_storage_config(self.config) self.internal_storage = InternalStorage(storage_config) self.remote_invoker = self.config['lithops'].get( 'remote_invoker', False) self.rabbitmq_monitor = self.config['lithops'].get( 'rabbitmq_monitor', False) if self.rabbitmq_monitor: self.rabbit_amqp_url = self.config['rabbitmq'].get('amqp_url') self.num_workers = self.config['lithops'].get('workers') logger.info('Total workers: {}'.format(self.num_workers)) serverless_config = extract_serverless_config(self.config) self.serverless_handler = ServerlessHandler(serverless_config, storage_config) self.token_bucket_q = mp.Queue() self.pending_calls_q = mp.Queue() self.job_monitor = JobMonitor(self.config, self.internal_storage, self.token_bucket_q) def _invoke(self, job, call_id): """ Method used to perform the actual invocation against the Compute Backend """ payload = { 'config': self.config, 'log_level': self.log_level, 'func_key': job.func_key, 'data_key': job.data_key, 'extra_env': job.extra_env, 'execution_timeout': job.execution_timeout, 'data_byte_range': job.data_ranges[int(call_id)], 'executor_id': job.executor_id, 'job_id': job.job_id, 'call_id': call_id, 'host_submit_tstamp': time.time(), 'lithops_version': __version__, 'runtime_name': job.runtime_name, 'runtime_memory': job.runtime_memory } # do the invocation start = time.time() activation_id = self.serverless_handler.invoke(job.runtime_name, job.runtime_memory, payload) roundtrip = time.time() - start resp_time = format(round(roundtrip, 3), '.3f') if not activation_id: self.pending_calls_q.put((job, call_id)) return logger.info('ExecutorID {} | JobID {} - Function invocation ' '{} done! ({}s) - Activation ID: {}'.format( job.executor_id, job.job_id, call_id, resp_time, activation_id)) return call_id def run(self, job_description): """ Run a job described in job_description """ job = SimpleNamespace(**job_description) log_msg = ('ExecutorID {} | JobID {} - Starting function ' 'invocation: {}() - Total: {} activations'.format( job.executor_id, job.job_id, job.function_name, job.total_calls)) logger.info(log_msg) self.total_calls = job.total_calls if self.num_invokers == 0: # Localhost execution using processes for i in range(job.total_calls): call_id = "{:05d}".format(i) self._invoke(job, call_id) else: for i in range(self.num_workers): self.token_bucket_q.put('#') for i in range(job.total_calls): call_id = "{:05d}".format(i) self.pending_calls_q.put((job, call_id)) self.job_monitor.start_job_monitoring(job) invokers = [] for inv_id in range(self.num_invokers): p = mp.Process(target=self._run_process, args=(inv_id, )) p.daemon = True p.start() invokers.append(p) for p in invokers: p.join() def _run_process(self, inv_id): """ Run process that implements token bucket scheduling approach """ logger.info('Invoker process {} started'.format(inv_id)) call_futures = [] with ThreadPoolExecutor(max_workers=250) as executor: # TODO: Change pending_calls_q check while self.pending_calls_q.qsize() > 0: self.token_bucket_q.get() job, call_id = self.pending_calls_q.get() future = executor.submit(self._invoke, job, call_id) call_futures.append(future) logger.info('Invoker process {} finished'.format(inv_id))
def build_runtime(name, file, config=None): config = default_config(config) storage_config = extract_storage_config(config) compute_config = extract_serverless_config(config) compute_handler = ServerlessHandler(compute_config, storage_config) compute_handler.build_runtime(name, file)