def clean_runtimes(config=None): logger.info('Cleaning all runtimes and cache information') config = default_config(config) storage_config = extract_storage_config(config) internal_storage = InternalStorage(storage_config) compute_config = extract_compute_config(config) compute_handler = Compute(compute_config) # Clean local runtime_meta cache if os.path.exists(CACHE_DIR): shutil.rmtree(CACHE_DIR) # Clean localhost dirs localhost_jobs_path = os.path.join(TEMP, STORAGE_PREFIX_DEFAULT) if os.path.exists(localhost_jobs_path): shutil.rmtree(localhost_jobs_path) localhost_runtimes_path = os.path.join(TEMP, RUNTIMES_PREFIX_DEFAULT) if os.path.exists(localhost_runtimes_path): shutil.rmtree(localhost_runtimes_path) # Clean runtime metadata in the object storage sh = internal_storage.storage_handler runtimes = sh.list_keys(storage_config['bucket'], RUNTIMES_PREFIX_DEFAULT) if runtimes: sh.delete_objects(storage_config['bucket'], runtimes) compute_handler.delete_all_runtimes()
def clean_all(config=None): logger.info('Cleaning all PyWren information') config = default_config(config) storage_config = extract_storage_config(config) internal_storage = InternalStorage(storage_config) compute_config = extract_compute_config(config) compute_handler = Compute(compute_config) # Clean object storage temp dirs sh = internal_storage.storage_handler runtimes = sh.list_keys(storage_config['bucket'], RUNTIMES_PREFIX) if runtimes: sh.delete_objects(storage_config['bucket'], runtimes) compute_handler.delete_all_runtimes() clean_bucket(storage_config['bucket'], JOBS_PREFIX, internal_storage, sleep=1) # Clean local runtime_meta cache if os.path.exists(CACHE_DIR): shutil.rmtree(CACHE_DIR) # Clean localhost temp dirs localhost_jobs_path = os.path.join(TEMP, JOBS_PREFIX) if os.path.exists(localhost_jobs_path): shutil.rmtree(localhost_jobs_path) localhost_runtimes_path = os.path.join(TEMP, RUNTIMES_PREFIX) if os.path.exists(localhost_runtimes_path): shutil.rmtree(localhost_runtimes_path)
def __init__(self, config, executor_id): self.log_level = os.getenv('CB_LOG_LEVEL') self.config = config self.executor_id = executor_id self.storage_config = extract_storage_config(self.config) compute_config = extract_compute_config(config) self.internal_compute = Compute(compute_config)
def __init__(self, config, executor_id, internal_storage): self.log_level = os.getenv('PYWREN_LOGLEVEL') self.config = config self.executor_id = executor_id self.storage_config = extract_storage_config(self.config) self.internal_storage = internal_storage self.compute_config = extract_compute_config(self.config) self.is_pywren_function = is_pywren_function() self.invokers = [] self.remote_invoker = self.config['pywren'].get('remote_invoker', False) self.workers = self.config['pywren'].get('workers') logger.debug('ExecutorID {} - Total available workers: {}' .format(self.executor_id, self.workers)) self.compute_handlers = [] cb = self.compute_config['backend'] regions = self.compute_config[cb].get('region') if regions and type(regions) == list: for region in regions: compute_config = self.compute_config.copy() compute_config[cb]['region'] = region self.compute_handlers.append(Compute(compute_config)) else: self.compute_handlers.append(Compute(self.compute_config)) logger.debug('ExecutorID {} - Creating function invoker'.format(self.executor_id)) self.token_bucket_q = Queue() self.pending_calls_q = Queue() self.running_flag = Value('i', 0) self.ongoing_activations = 0 self.job_monitor = JobMonitor(self.config, self.internal_storage, self.token_bucket_q)
def __init__(self, config, log_level): self.config = config self.log_level = log_level storage_config = extract_storage_config(self.config) self.internal_storage = InternalStorage(storage_config) compute_config = extract_compute_config(self.config) self.remote_invoker = self.config['pywren'].get( 'remote_invoker', False) self.rabbitmq_monitor = self.config['pywren'].get( 'rabbitmq_monitor', False) if self.rabbitmq_monitor: self.rabbit_amqp_url = self.config['rabbitmq'].get('amqp_url') self.workers = self.config['pywren'].get('workers') logger.debug('Total workers: {}'.format(self.workers)) self.compute_handlers = [] cb = compute_config['backend'] regions = compute_config[cb].get('region') if regions and type(regions) == list: for region in regions: new_compute_config = compute_config.copy() new_compute_config[cb]['region'] = region self.compute_handlers.append(Compute(new_compute_config)) else: self.compute_handlers.append(Compute(compute_config)) self.token_bucket_q = Queue() self.pending_calls_q = Queue() self.job_monitor = JobMonitor(self.config, self.internal_storage, self.token_bucket_q)
def build_runtime(name, file, config=None): config = default_config(config) compute_config = extract_compute_config(config) compute_handler = Compute(compute_config) compute_handler.build_runtime(name, file) create_runtime(name, config=config) update_runtime(name, config=config)
def build_runtime(name, file, config=None): config = default_config(config) compute_config = extract_compute_config(config) internal_compute = Compute(compute_config) internal_compute.build_runtime(name, file) create_runtime(name, config=config) update_runtime(name, config=config)
def __init__(self, config, num_invokers, log_level): self.config = config self.num_invokers = num_invokers self.log_level = log_level storage_config = extract_storage_config(self.config) self.internal_storage = InternalStorage(storage_config) compute_config = extract_compute_config(self.config) self.remote_invoker = self.config['pywren'].get( 'remote_invoker', False) self.rabbitmq_monitor = self.config['pywren'].get( 'rabbitmq_monitor', False) if self.rabbitmq_monitor: self.rabbit_amqp_url = self.config['rabbitmq'].get('amqp_url') self.num_workers = self.config['pywren'].get('workers') logger.debug('Total workers: {}'.format(self.num_workers)) self.compute_handlers = [] cb = compute_config['backend'] regions = compute_config[cb].get('region') if regions and type(regions) == list: for region in regions: new_compute_config = compute_config.copy() new_compute_config[cb]['region'] = region compute_handler = Compute(new_compute_config) self.compute_handlers.append(compute_handler) else: if cb == 'localhost': global CBH if cb in CBH and CBH[ cb].compute_handler.num_workers != self.num_workers: del CBH[cb] if cb in CBH: logger.info( '{} compute handler already started'.format(cb)) compute_handler = CBH[cb] self.compute_handlers.append(compute_handler) else: logger.info('Starting {} compute handler'.format(cb)) compute_handler = Compute(compute_config) CBH[cb] = compute_handler self.compute_handlers.append(compute_handler) else: compute_handler = Compute(compute_config) self.compute_handlers.append(compute_handler) self.token_bucket_q = Queue() self.pending_calls_q = Queue() self.job_monitor = JobMonitor(self.config, self.internal_storage, self.token_bucket_q)
def update_runtime(name, config=None): config = default_config(config) storage_config = extract_storage_config(config) internal_storage = InternalStorage(storage_config) compute_config = extract_compute_config(config) compute_handler = Compute(compute_config) timeout = config['pywren']['runtime_timeout'] logger.info('Updating runtime: {}'.format(name)) if name != 'all': runtime_meta = compute_handler.generate_runtime_meta(name) else: runtime_meta = None runtimes = compute_handler.list_runtimes(name) for runtime in runtimes: compute_handler.create_runtime(runtime[0], runtime[1], timeout) if runtime_meta: try: runtime_key = compute_handler.get_runtime_key( runtime[0], runtime[1]) internal_storage.put_runtime_meta(runtime_key, runtime_meta) except Exception: raise ("Unable to upload 'preinstalled modules' file into {}". format(internal_storage.backend))
def __init__(self, config, executor_id, internal_storage): self.log_level = os.getenv('PYWREN_LOGLEVEL') self.config = config self.executor_id = executor_id self.storage_config = extract_storage_config(self.config) self.internal_storage = internal_storage self.compute_config = extract_compute_config(self.config) self.remote_invoker = self.config['pywren'].get( 'remote_invoker', False) self.rabbitmq_monitor = self.config['pywren'].get( 'rabbitmq_monitor', False) if self.rabbitmq_monitor: self.rabbit_amqp_url = self.config['rabbitmq'].get('amqp_url') self.workers = self.config['pywren'].get('workers') logger.debug('ExecutorID {} - Total workers:'.format(self.workers)) self.compute_handlers = [] cb = self.compute_config['backend'] regions = self.compute_config[cb].get('region') if regions and type(regions) == list: for region in regions: compute_config = self.compute_config.copy() compute_config[cb]['region'] = region self.compute_handlers.append(Compute(compute_config)) else: self.compute_handlers.append(Compute(self.compute_config)) logger.debug('ExecutorID {} - Creating invoker process'.format( self.executor_id)) self.token_bucket_q = Queue() self.pending_calls_q = Queue() self.invoker_process_stop_flag = Value('i', 0) self.is_pywren_function = is_pywren_function() if self.is_pywren_function or not is_unix_system(): self.invoker_process = Thread(target=self.run_process, args=()) else: self.invoker_process = Process(target=self.run_process, args=()) self.invoker_process.daemon = True self.invoker_process.start() self.ongoing_activations = 0
def __init__(self, pywren_config, executor_id, internal_storage): self.log_level = os.getenv('PYWREN_LOGLEVEL') self.pywren_config = pywren_config self.executor_id = executor_id self.storage_config = extract_storage_config(self.pywren_config) self.internal_storage = internal_storage self.compute_config = extract_compute_config(self.pywren_config) self.compute_handlers = [] cb = self.compute_config['backend'] regions = self.compute_config[cb].get('region') if type(regions) == list: for region in regions: compute_config = self.compute_config.copy() compute_config[cb]['region'] = region self.compute_handlers.append(Compute(compute_config)) else: self.compute_handlers.append(Compute(self.compute_config))
def create_runtime(name, memory=None, config=None): config = default_config(config) storage_config = extract_storage_config(config) internal_storage = InternalStorage(storage_config) compute_config = extract_compute_config(config) compute_handler = Compute(compute_config) memory = config['pywren']['runtime_memory'] if not memory else memory timeout = config['pywren']['runtime_timeout'] logger.info('Creating runtime: {}, memory: {}'.format(name, memory)) runtime_key = compute_handler.get_runtime_key(name, memory) runtime_meta = compute_handler.create_runtime(name, memory, timeout=timeout) try: internal_storage.put_runtime_meta(runtime_key, runtime_meta) except Exception: raise("Unable to upload 'preinstalled-modules' file into {}".format(internal_storage.backend))
def clean_runtimes(config=None): logger.info('Cleaning all runtimes and cache information') config = default_config(config) storage_config = extract_storage_config(config) internal_storage = InternalStorage(storage_config) compute_config = extract_compute_config(config) compute_handler = Compute(compute_config) # Clean local runtime_meta cache if os.path.exists(CACHE_DIR): shutil.rmtree(CACHE_DIR) sh = internal_storage.storage_handler runtimes = sh.list_keys(storage_config['bucket'], 'runtime') if runtimes: sh.delete_objects(storage_config['bucket'], runtimes) compute_handler.delete_all_runtimes()
def clean_runtimes(config=None): logger.info('Cleaning all runtimes') config = default_config(config) storage_config = extract_storage_config(config) internal_storage = InternalStorage(storage_config) compute_config = extract_compute_config(config) compute_handler = Compute(compute_config) # Clean local runtime_meta cache cache_dir = os.path.join(os.path.expanduser('~'), '.cloudbutton') if os.path.exists(cache_dir): shutil.rmtree(cache_dir) sh = internal_storage.storage_handler runtimes = sh.list_keys(storage_config['bucket'], 'runtime') if runtimes: sh.delete_objects(storage_config['bucket'], runtimes) compute_handler.delete_all_runtimes()
def clean_all(config=None): logger.info('Cleaning all PyWren information') config = default_config(config) storage_config = extract_storage_config(config) internal_storage = InternalStorage(storage_config) compute_config = extract_compute_config(config) compute_handler = Compute(compute_config) # Clean localhost executor temp dirs shutil.rmtree(STORAGE_FOLDER, ignore_errors=True) shutil.rmtree(DOCKER_FOLDER, ignore_errors=True) # Clean object storage temp dirs compute_handler.delete_all_runtimes() storage = internal_storage.storage clean_bucket(storage, storage_config['bucket'], RUNTIMES_PREFIX, sleep=1) clean_bucket(storage, storage_config['bucket'], JOBS_PREFIX, sleep=1) # Clean local pywren cache shutil.rmtree(CACHE_DIR, ignore_errors=True)
def delete_runtime(name, config=None): config = default_config(config) storage_config = extract_storage_config(config) internal_storage = InternalStorage(storage_config) compute_config = extract_compute_config(config) compute_handler = Compute(compute_config) runtimes = compute_handler.list_runtimes(name) for runtime in runtimes: compute_handler.delete_runtime(runtime[0], runtime[1]) runtime_key = compute_handler.get_runtime_key(runtime[0], runtime[1]) internal_storage.delete_runtime_meta(runtime_key)
class Invoker: def __init__(self, config, executor_id): self.log_level = os.getenv('CB_LOG_LEVEL') self.config = config self.executor_id = executor_id self.storage_config = extract_storage_config(self.config) compute_config = extract_compute_config(config) self.internal_compute = Compute(compute_config) def run(self, job_description): job = SimpleNamespace(**job_description) if job.remote_invocation: log_msg = ( 'ExecutorID {} | JobID {} - Starting {} remote invocation function: Spawning {}() ' '- Total: {} activations'.format(self.executor_id, job.job_id, job.total_calls, job.func_name, job.original_total_calls)) else: log_msg = ( 'ExecutorID {} | JobID {} - Starting function invocation: {}() - Total: {} ' 'activations'.format(self.executor_id, job.job_id, job.func_name, job.total_calls)) logger.info(log_msg) if not self.log_level: print(log_msg) ######################## def invoke(executor_id, job_id, call_id, func_key, invoke_metadata, data_key, data_byte_range): output_key = create_output_key(self.storage_config['prefix'], executor_id, job_id, call_id) status_key = create_status_key(self.storage_config['prefix'], executor_id, job_id, call_id) payload = { 'config': self.config, 'log_level': self.log_level, 'func_key': func_key, 'data_key': data_key, 'output_key': output_key, 'status_key': status_key, 'task_execution_timeout': job.task_execution_timeout, 'data_byte_range': data_byte_range, 'executor_id': executor_id, 'job_id': job_id, 'call_id': call_id, 'pywren_version': __version__ } if job.extra_env is not None: logger.debug("Extra environment vars {}".format(job.extra_env)) payload['extra_env'] = job.extra_env if job.extra_meta is not None: # sanity for k, v in job.extra_meta.items(): if k in payload: raise ValueError("Key {} already in dict".format(k)) payload[k] = v # overwrite explicit args, mostly used for testing via injection if job.overwrite_invoke_args is not None: payload.update(job.overwrite_invoke_args) host_submit_time = time.time() payload['host_submit_time'] = host_submit_time # do the invocation activation_id = self.internal_compute.invoke( job.runtime_name, job.runtime_memory, payload) if not activation_id: raise Exception( "ExecutorID {} - Activation {} failed, therefore job is failed" .format(executor_id, call_id)) invoke_metadata['activation_id'] = activation_id invoke_metadata['invoke_time'] = time.time() - host_submit_time invoke_metadata.update(payload) del invoke_metadata['config'] fut = ResponseFuture(call_id, job_id, executor_id, activation_id, self.storage_config, invoke_metadata) fut._set_state(JobState.invoked) return fut ######################## call_futures = [] with ThreadPoolExecutor( max_workers=job.invoke_pool_threads) as executor: for i in range(job.total_calls): call_id = "{:05d}".format(i) data_byte_range = job.data_ranges[i] future = executor.submit(invoke, self.executor_id, job.job_id, call_id, job.func_key, job.host_job_meta.copy(), job.data_key, data_byte_range) call_futures.append(future) res = [ft.result() for ft in call_futures] return res