def select_runtime(self, job_id, runtime_memory): """ Return the runtime metadata """ if not runtime_memory: runtime_memory = self.config['serverless']['runtime_memory'] timeout = self.config['serverless']['runtime_timeout'] log_msg = ('ExecutorID {} | JobID {} - Selected Runtime: {} - {}MB ' .format(self.executor_id, job_id, self.runtime_name, runtime_memory)) logger.info(log_msg) runtime_key = self.compute_handler.get_runtime_key(self.runtime_name, runtime_memory) runtime_meta = self.internal_storage.get_runtime_meta(runtime_key) if not runtime_meta: logger.info('Runtime {} with {}MB is not yet installed'.format(self.runtime_name, runtime_memory)) runtime_meta = self.compute_handler.create_runtime(self.runtime_name, runtime_memory, timeout) self.internal_storage.put_runtime_meta(runtime_key, runtime_meta) py_local_version = version_str(sys.version_info) py_remote_version = runtime_meta['python_ver'] if py_local_version != py_remote_version: raise Exception(("The indicated runtime '{}' is running Python {} and it " "is not compatible with the local Python version {}") .format(self.runtime_name, py_remote_version, py_local_version)) return runtime_meta
def load_config(config_data): if 'runtime_memory' not in config_data['serverless']: config_data['serverless']['runtime_memory'] = RUNTIME_MEMORY_DEFAULT if 'runtime_timeout' not in config_data['serverless']: config_data['serverless']['runtime_timeout'] = RUNTIME_TIMEOUT_DEFAULT if 'runtime' not in config_data['serverless']: python_version = version_str(sys.version_info) try: config_data['serverless']['runtime'] = RUNTIME_DEFAULT[ python_version] except KeyError: raise Exception( 'Unsupported Python version: {}'.format(python_version)) config_data['serverless']['remote_invoker'] = True if 'workers' not in config_data['lithops'] or \ config_data['lithops']['workers'] > MAX_CONCURRENT_WORKERS: config_data['lithops']['workers'] = MAX_CONCURRENT_WORKERS if 'code_engine' in config_data: if 'runtime_cpu' not in config_data['code_engine']: config_data['code_engine']['runtime_cpu'] = CPU_DEFAULT if 'api_version' not in config_data['code_engine']: config_data['code_engine']['api_version'] = DEFAULT_API_VERSION if 'group' not in config_data['code_engine']: config_data['code_engine']['group'] = DEFAULT_GROUP if 'version' not in config_data['code_engine']: config_data['code_engine']['version'] = DEFAULT_VERSION
def load_config(config_data): if 'ibm_cf' not in config_data: raise Exception("'ibm_cf' section is mandatory in the configuration") for param in REQ_PARAMS: if param not in config_data['ibm_cf']: msg = "{} is mandatory in 'ibm_cf' section of the configuration".format(REQ_PARAMS) raise Exception(msg) if 'ibm' in config_data and config_data['ibm'] is not None: config_data['ibm_cf'].update(config_data['ibm']) if not all(elem in config_data['ibm_cf'] for elem in OPT_PARAMS_1) and \ not all(elem in config_data['ibm_cf'] for elem in OPT_PARAMS_2): raise Exception('You must provide either {}, or {} in {} section of the configuration' .format(OPT_PARAMS_1, OPT_PARAMS_2, 'ibm_cf')) if 'runtime_memory' not in config_data['ibm_cf']: config_data['ibm_cf']['runtime_memory'] = RUNTIME_MEMORY_DEFAULT if 'runtime_timeout' not in config_data['ibm_cf']: config_data['ibm_cf']['runtime_timeout'] = RUNTIME_TIMEOUT_DEFAULT if 'runtime' not in config_data['ibm_cf']: python_version = version_str(sys.version_info) try: config_data['ibm_cf']['runtime'] = RUNTIME_DEFAULT[python_version] except KeyError: raise Exception('Unsupported Python version: {}'.format(python_version)) if 'invoke_pool_threads' not in config_data['ibm_cf']: config_data['ibm_cf']['invoke_pool_threads'] = INVOKE_POOL_THREADS_DEFAULT if 'workers' not in config_data['lithops'] or \ config_data['lithops']['workers'] > MAX_CONCURRENT_WORKERS: config_data['lithops']['workers'] = MAX_CONCURRENT_WORKERS
def _get_default_runtime_image_name(self): docker_user = self.knative_config['docker_user'] python_version = version_str(sys.version_info).replace('.', '') revision = 'latest' if 'SNAPSHOT' in __version__ else __version__.replace( '.', '') return '{}/{}-v{}:{}'.format(docker_user, kconfig.RUNTIME_NAME_DEFAULT, python_version, revision)
def _get_default_runtime_image_name(self): docker_user = self.k8s_config.get('docker_user') python_version = version_str(sys.version_info).replace('.', '') revision = 'latest' if 'dev' in __version__ else __version__.replace( '.', '') return '{}/{}-v{}:{}'.format(docker_user, k8s_config.RUNTIME_NAME, python_version, revision)
def select_runtime(self, job_id, runtime_memory): """ Return the runtime metadata """ log_msg = ('ExecutorID {} | JobID {} - Selected Runtime: {} ' .format(self.executor_id, job_id, self.runtime_name)) logger.info(log_msg) if not self.log_active: print(log_msg, end='') runtime_key = self.compute_handler.get_runtime_key(self.runtime_name) runtime_meta = self.internal_storage.get_runtime_meta(runtime_key) if not runtime_meta: logger.debug('Runtime {} is not yet installed'.format(self.runtime_name)) if not self.log_active: print('(Installing...)') runtime_meta = self.compute_handler.create_runtime(self.runtime_name) self.internal_storage.put_runtime_meta(runtime_key, runtime_meta) else: if not self.log_active: print() py_local_version = version_str(sys.version_info) py_remote_version = runtime_meta['python_ver'] if py_local_version != py_remote_version: raise Exception(("The indicated runtime '{}' is running Python {} and it " "is not compatible with the local Python version {}") .format(self.runtime_name, py_remote_version, py_local_version)) return runtime_meta
def load_config(config_data): if 'code_engine' not in config_data: config_data['code_engine'] = {} if 'kubectl_config' in config_data['code_engine']: print( '"kubectl_config" variable in config is deprecated, use "kubecfg_path" instead' ) config_data['code_engine']['kubecfg_path'] = config_data[ 'code_engine']['kubectl_config'] if 'cpu' not in config_data['code_engine']: config_data['code_engine']['cpu'] = RUNTIME_CPU if 'container_registry' not in config_data['code_engine']: config_data['code_engine']['container_registry'] = CONTAINER_REGISTRY if 'runtime_memory' not in config_data['serverless']: config_data['serverless']['runtime_memory'] = RUNTIME_MEMORY if 'runtime_timeout' not in config_data['serverless']: config_data['serverless']['runtime_timeout'] = RUNTIME_TIMEOUT if 'runtime' in config_data['code_engine']: config_data['serverless']['runtime'] = config_data['code_engine'][ 'runtime'] if 'runtime' not in config_data['serverless']: if not DOCKER_PATH: raise Exception('docker command not found. Install docker or use ' 'an already built runtime') if 'docker_user' not in config_data['code_engine']: config_data['code_engine']['docker_user'] = get_docker_username() if not config_data['code_engine']['docker_user']: raise Exception('You must provide "docker_user" param in config ' 'or execute "docker login"') docker_user = config_data['code_engine']['docker_user'] python_version = version_str(sys.version_info).replace('.', '') revision = 'latest' if 'dev' in __version__ else __version__.replace( '.', '') runtime_name = '{}/{}-v{}:{}'.format(docker_user, RUNTIME_NAME, python_version, revision) config_data['serverless']['runtime'] = runtime_name else: if config_data['serverless']['runtime'].count('/') > 1: # container registry is in the provided runtime name cr, rn = config_data['serverless']['runtime'].split('/', 1) config_data['code_engine']['container_registry'] = cr config_data['serverless']['runtime'] = rn config_data['serverless']['remote_invoker'] = True if 'workers' not in config_data['lithops'] or \ config_data['lithops']['workers'] > MAX_CONCURRENT_WORKERS: config_data['lithops']['workers'] = MAX_CONCURRENT_WORKERS if 'invoke_pool_threads' not in config_data['code_engine']: config_data['code_engine'][ 'invoke_pool_threads'] = INVOKE_POOL_THREADS_DEFAULT config_data['serverless']['invoke_pool_threads'] = config_data[ 'code_engine']['invoke_pool_threads']
def load_config(config_data): for param in REQ_PARAMS: if param not in config_data['ibm_cf']: msg = "{} is mandatory in 'ibm_cf' section of the configuration".format( REQ_PARAMS) raise Exception(msg) if 'ibm' in config_data and config_data['ibm'] is not None: config_data['ibm_cf'].update(config_data['ibm']) if not all(elem in config_data['ibm_cf'] for elem in OPT_PARAMS_1) and \ not all(elem in config_data['ibm_cf'] for elem in OPT_PARAMS_2): raise Exception( 'You must provide either {}, or {} in {} section of the configuration' .format(OPT_PARAMS_1, OPT_PARAMS_2, 'ibm_cf')) for key in DEFAULT_CONFIG_KEYS: if key not in config_data['ibm_cf']: config_data['ibm_cf'][key] = DEFAULT_CONFIG_KEYS[key] if 'runtime' not in config_data['ibm_cf']: python_version = version_str(sys.version_info) try: config_data['ibm_cf']['runtime'] = RUNTIME_DEFAULT[python_version] except KeyError: raise Exception( 'Unsupported Python version: {}'.format(python_version))
def _build_default_runtime(self, default_runtime_img_name): """ Builds the default runtime """ if os.system('{} --version >{} 2>&1'.format(kconfig.DOCKER_PATH, os.devnull)) == 0: # Build default runtime using local dokcer python_version = version_str(sys.version_info).replace('.', '') location = 'https://raw.githubusercontent.com/lithops-cloud/lithops/master/runtime/knative' resp = requests.get('{}/Dockerfile.python{}'.format( location, python_version)) dockerfile = "Dockefile.default-kantive-runtime" if resp.status_code == 200: with open(dockerfile, 'w') as f: f.write(resp.text) self.build_runtime(default_runtime_img_name, dockerfile) os.remove(dockerfile) else: msg = 'There was an error fetching the default runitme Dockerfile: {}'.format( resp.text) logger.error(msg) exit() else: # Build default runtime using Tekton self._build_default_runtime_from_git(default_runtime_img_name)
def load_config(config_data): if 'k8s' not in config_data: config_data['k8s'] = {} if 'runtime_cpu' not in config_data['k8s']: config_data['k8s']['runtime_cpu'] = RUNTIME_CPU if 'runtime_memory' not in config_data['k8s']: config_data['k8s']['runtime_memory'] = RUNTIME_MEMORY if 'runtime_timeout' not in config_data['k8s']: config_data['k8s']['runtime_timeout'] = RUNTIME_TIMEOUT if 'runtime' not in config_data['k8s']: if not DOCKER_PATH: raise Exception('docker command not found. Install docker or use ' 'an already built runtime') if 'docker_user' not in config_data['k8s']: config_data['k8s']['docker_user'] = get_docker_username() if not config_data['k8s']['docker_user']: raise Exception( 'You must execute "docker login" or provide "docker_user" ' 'param in config under "k8s" section') docker_user = config_data['k8s']['docker_user'] python_version = version_str(sys.version_info).replace('.', '') revision = 'latest' if 'dev' in __version__ else __version__.replace( '.', '') runtime_name = '{}/{}-v{}:{}'.format(docker_user, RUNTIME_NAME, python_version, revision) config_data['k8s']['runtime'] = runtime_name if 'workers' not in config_data['lithops'] or \ config_data['lithops']['workers'] > MAX_CONCURRENT_WORKERS: config_data['lithops']['workers'] = MAX_CONCURRENT_WORKERS
def select_runtime(self, job_id, runtime_memory): """ Return the runtime metadata """ log_msg = ('ExecutorID {} | JobID {} - Selected Runtime: {} '.format( self.executor_id, job_id, self.runtime_name)) logger.info(log_msg) runtime_key = self.compute_handler.get_runtime_key(self.runtime_name) runtime_meta = self.internal_storage.get_runtime_meta(runtime_key) if not runtime_meta: logger.info('Runtime {} is not yet installed'.format( self.runtime_name)) runtime_meta = self.compute_handler.create_runtime( self.runtime_name) self.internal_storage.put_runtime_meta(runtime_key, runtime_meta) if lithops_version != runtime_meta['lithops_version']: raise Exception( "Lithops version mismatch. Host version: {} - Runtime version: {}" .format(lithops_version, runtime_meta['lithops_version'])) py_local_version = version_str(sys.version_info) py_remote_version = runtime_meta['python_version'] if py_local_version != py_remote_version: raise Exception( ("The indicated runtime '{}' is running Python {} and it " "is not compatible with the local Python version {}").format( self.runtime_name, py_remote_version, py_local_version)) return runtime_meta
def create_runtime(self, runtime_name, memory=3008, timeout=900): """ Create a Lithops runtime as an AWS Lambda function """ function_name = self._format_action_name(runtime_name, memory) logger.debug( 'Creating new Lithops lambda runtime: {}'.format(function_name)) runtime_meta = self._generate_runtime_meta(runtime_name) runtime_layer_arn = self._check_runtime_layer(runtime_name) if runtime_layer_arn is None: runtime_layer_arn = self._create_layer(runtime_name) code = self._create_handler_bin() python_runtime_ver = 'python{}'.format(version_str(sys.version_info)) response = self.lambda_client.create_function( FunctionName=function_name, Runtime=python_runtime_ver, Role=self.role_arn, Handler='__main__.lambda_handler', Code={'ZipFile': code}, Description=self.package, Timeout=timeout, MemorySize=memory, Layers=[runtime_layer_arn, self._numerics_layer_arn]) if response['ResponseMetadata']['HTTPStatusCode'] == 201: logger.debug('OK --> Created action {}'.format(runtime_name)) else: msg = 'An error occurred creating/updating action {}: {}'.format( runtime_name, response) raise Exception(msg) return runtime_meta
def extract_runtime_meta(): runtime_meta = dict() mods = list(pkgutil.iter_modules()) runtime_meta["preinstalls"] = [entry for entry in sorted([[mod, is_pkg]for _, mod, is_pkg in mods])] runtime_meta["python_ver"] = version_str(sys.version_info) print(json.dumps(runtime_meta))
def load_config(config_data): for key in DEFAULT_CONFIG_KEYS: if key not in config_data['knative']: config_data['knative'][key] = DEFAULT_CONFIG_KEYS[key] config_data['knative']['invoke_pool_threads'] = config_data['knative'][ 'max_workers'] if 'git_url' not in config_data['knative']: config_data['knative']['git_url'] = BUILD_GIT_URL if 'git_rev' not in config_data['knative']: revision = 'master' if 'dev' in __version__ else __version__ config_data['knative']['git_rev'] = revision if 'runtime' not in config_data['knative']: if not DOCKER_PATH: raise Exception('docker command not found. Install docker or use ' 'an already built runtime') if 'docker_user' not in config_data['knative']: config_data['knative']['docker_user'] = get_docker_username() if not config_data['knative']['docker_user']: raise Exception('You must provide "docker_user" param in config ' 'or execute "docker login"') docker_user = config_data['knative']['docker_user'] python_version = version_str(sys.version_info).replace('.', '') revision = 'latest' if 'dev' in __version__ else __version__.replace( '.', '') runtime_name = '{}/{}-v{}:{}'.format(docker_user, RUNTIME_NAME, python_version, revision) config_data['knative']['runtime'] = runtime_name
def load_config(config_data): if 'openwhisk' not in config_data: raise Exception("'openwhisk' section is mandatory in the configuration") for param in REQ_PARAMS: if param not in config_data['openwhisk']: msg = "{} is mandatory in 'openwhisk' section of the configuration".format(REQ_PARAMS) raise Exception(msg) if 'runtime_memory' not in config_data['openwhisk']: config_data['openwhisk']['runtime_memory'] = RUNTIME_MEMORY_DEFAULT if 'runtime_timeout' not in config_data['openwhisk']: config_data['openwhisk']['runtime_timeout'] = RUNTIME_TIMEOUT_DEFAULT if 'runtime' not in config_data['openwhisk']: python_version = version_str(sys.version_info) try: config_data['openwhisk']['runtime'] = RUNTIME_DEFAULT[python_version] except KeyError: raise Exception('Unsupported Python version: {}'.format(python_version)) if 'invoke_pool_threads' not in config_data['openwhisk']: config_data['openwhisk']['invoke_pool_threads'] = INVOKE_POOL_THREADS_DEFAULT if 'workers' not in config_data['lithops'] or \ config_data['lithops']['workers'] > MAX_CONCURRENT_WORKERS: config_data['lithops']['workers'] = MAX_CONCURRENT_WORKERS
def _create_function(self, docker_image_name, memory, timeout): """ Create and publish an Azure Functions """ action_name = self._format_action_name(docker_image_name, memory) logger.info( 'Creating new Lithops runtime for Azure Function: {}'.format( action_name)) if self.invocation_type == 'event': try: in_q_name = self._format_queue_name(action_name, az_config.IN_QUEUE) logger.debug('Creating queue {}'.format(in_q_name)) self.queue_service.create_queue(in_q_name) except Exception: in_queue = self.queue_service.get_queue_client(in_q_name) in_queue.clear_messages() try: out_q_name = self._format_queue_name(action_name, az_config.OUT_QUEUE) logger.debug('Creating queue {}'.format(out_q_name)) self.queue_service.create_queue(out_q_name) except Exception: out_queue = self.queue_service.get_queue_client(out_q_name) out_queue.clear_messages() python_version = version_str(sys.version_info) cmd = ( 'az functionapp create --name {} --storage-account {} ' '--resource-group {} --os-type Linux --runtime python ' '--runtime-version {} --functions-version {} --consumption-plan-location {}' .format(action_name, self.storage_account_name, self.resource_group, python_version, self.functions_version, self.location)) if logger.getEffectiveLevel() != logging.DEBUG: cmd = cmd + " >{} 2>&1".format(os.devnull) res = os.system(cmd) if res != 0: raise Exception( 'There was an error creating the function in Azure. cmd: {}'. format(cmd)) logger.debug('Publishing function: {}'.format(action_name)) build_dir = os.path.join(az_config.BUILD_DIR, action_name) os.chdir(build_dir) res = 1 while res != 0: time.sleep(5) if is_unix_system(): cmd = 'func azure functionapp publish {} --python --no-build'.format( action_name) else: cmd = 'func azure functionapp publish {} --python'.format( action_name) if logger.getEffectiveLevel() != logging.DEBUG: cmd = cmd + " >{} 2>&1".format(os.devnull) res = os.system(cmd) time.sleep(10)
def load_config(config_data): if 'openwhisk' not in config_data: raise Exception("openwhisk section is mandatory in configuration") required_keys = ('endpoint', 'namespace', 'api_key') if not set(required_keys) <= set(config_data['openwhisk']): raise Exception( 'You must provide {} to access to openwhisk'.format(required_keys)) if 'runtime_memory' not in config_data['serverless']: config_data['serverless']['runtime_memory'] = RUNTIME_MEMORY_DEFAULT if 'runtime_timeout' not in config_data['serverless']: config_data['serverless']['runtime_timeout'] = RUNTIME_TIMEOUT_DEFAULT if 'runtime' in config_data['openwhisk']: config_data['serverless']['runtime'] = config_data['openwhisk'][ 'runtime'] if 'runtime' not in config_data['serverless']: python_version = version_str(sys.version_info) try: config_data['serverless']['runtime'] = RUNTIME_DEFAULT[ python_version] except KeyError: raise Exception( 'Unsupported Python version: {}'.format(python_version)) if 'workers' not in config_data['lithops']: config_data['lithops']['workers'] = CONCURRENT_WORKERS_DEFAULT if 'invoke_pool_threads' not in config_data['openwhisk']: config_data['openwhisk'][ 'invoke_pool_threads'] = INVOKE_POOL_THREADS_DEFAULT config_data['serverless']['invoke_pool_threads'] = config_data[ 'openwhisk']['invoke_pool_threads']
def _get_default_runtime_image_name(self): python_version = version_str(sys.version_info).replace('.', '') revision = 'latest' if 'dev' in lithops.__version__ else lithops.__version__.replace( '.', '') runtime_name = '{}-v{}:{}'.format(batch_config.DEFAULT_RUNTIME_NAME, python_version, revision) return runtime_name
def load_config(config_data): if 'cloudrun' not in config_data: raise Exception("cloudrun section is mandatory in configuration") required_keys = ('project_id', 'region') if not set(required_keys) <= set(config_data['cloudrun']): raise Exception( 'You must provide {} to access to Cloud Run'.format(required_keys)) if 'runtime_memory' not in config_data['serverless']: config_data['serverless']['runtime_memory'] = RUNTIME_MEMORY_DEFAULT if 'runtime_timeout' not in config_data['serverless']: config_data['serverless']['runtime_timeout'] = RUNTIME_TIMEOUT_DEFAULT if 'runtime' not in config_data['serverless']: project_id = config_data['cloudrun']['project_id'] python_version = version_str(sys.version_info).replace('.', '') revision = 'latest' if 'SNAPSHOT' in __version__ else __version__.replace( '.', '') runtime_name = '{}/{}-v{}:{}'.format(project_id, RUNTIME_NAME_DEFAULT, python_version, revision) config_data['serverless']['runtime'] = runtime_name if 'workers' not in config_data['lithops']: config_data['cloudrun']['workers'] = CONCURRENT_WORKERS_DEFAULT config_data['lithops']['workers'] = CONCURRENT_WORKERS_DEFAULT
def _get_default_runtime_image_name(self): py_version = version_str(sys.version_info).replace('.', '') revision = 'latest' if 'dev' in __version__ else __version__.replace( '.', '') runtime_name = '{}-{}-v{}-{}'.format(self.storage_account, az_config.RUNTIME_NAME, py_version, revision) return runtime_name
def _get_default_runtime_image_name(self): project_id = self.cloudrun_config['project_id'] python_version = version_str(sys.version_info).replace('.', '') revision = 'latest' if 'SNAPSHOT' in __version__ else __version__.replace( '.', '') return '{}/{}-v{}:{}'.format(project_id, cr_config.RUNTIME_NAME_DEFAULT, python_version, revision)
def load_config(config_data=None): if config_data is None: config_data = {} if 'gcp' not in config_data: raise Exception("'gcp' section is mandatory in the configuration") if 'runtime_memory' not in config_data['serverless']: config_data['serverless']['runtime_memory'] = RUNTIME_MEMORY_DEFAULT if 'runtime_timeout' not in config_data['serverless']: config_data['serverless']['runtime_timeout'] = RUNTIME_TIMEOUT_DEFAULT if 'runtime' in config_data['gcp']: config_data['serverless']['runtime'] = config_data['gcp']['runtime'] if 'runtime' not in config_data['serverless']: config_data['serverless']['runtime'] = 'python' + version_str( sys.version_info) if 'workers' not in config_data['lithops']: config_data['lithops']['workers'] = MAX_CONCURRENT_WORKERS if config_data['serverless'][ 'runtime_memory'] not in RUNTIME_MEMORY_OPTIONS: raise Exception( '{} MB runtime is not available (Only one of {} MB is available)'. format(config_data['serverless']['runtime_memory'], RUNTIME_MEMORY_OPTIONS)) if config_data['serverless']['runtime_memory'] > RUNTIME_MEMORY_MAX: config_data['serverless']['runtime_memory'] = RUNTIME_MEMORY_MAX if config_data['serverless']['runtime_timeout'] > RUNTIME_TIMEOUT_DEFAULT: config_data['serverless']['runtime_timeout'] = RUNTIME_TIMEOUT_DEFAULT config_data['gcp']['retries'] = RETRIES config_data['gcp']['retry_sleep'] = RETRY_SLEEP required_parameters = ('project_name', 'service_account', 'credentials_path') if not set(required_parameters) <= set(config_data['gcp']): raise Exception( "'project_name', 'service_account' and 'credentials_path' are mandatory under 'gcp' section" ) if not exists(config_data['gcp']['credentials_path']) or not isfile( config_data['gcp']['credentials_path']): raise Exception("Path {} must be credentials JSON file.".format( config_data['gcp']['credentials_path'])) config_data['gcp_functions'] = config_data['gcp'].copy() if 'region' not in config_data['gcp_functions']: config_data['gcp_functions']['region'] = config_data['pywren'][ 'compute_backend_region'] if 'invoke_pool_threads' not in config_data['gcp']: config_data['gcp']['invoke_pool_threads'] = config_data['lithops'][ 'workers'] config_data['serverless']['invoke_pool_threads'] = config_data['gcp'][ 'invoke_pool_threads']
def load_config(config_data): if 'knative' not in config_data or not config_data['knative']: config_data['knative'] = {} if 'git_url' not in config_data['knative']: config_data['knative']['git_url'] = BUILD_GIT_URL if 'git_rev' not in config_data['knative']: revision = 'master' if 'dev' in __version__ else __version__ config_data['knative']['git_rev'] = revision if 'container_registry' not in config_data['knative']: config_data['knative']['container_registry'] = CONTAINER_REGISTRY if 'cpu' not in config_data['knative']: config_data['knative']['cpu'] = RUNTIME_CPU if 'concurrency' not in config_data['knative']: config_data['knative']['concurrency'] = RUNTIME_CONCURRENCY if 'min_instances' not in config_data['knative']: config_data['knative']['min_instances'] = RUNTIME_MIN_INSTANCES if 'max_instances' not in config_data['knative']: config_data['knative']['max_instances'] = RUNTIME_MAX_INSTANCES if 'runtime_memory' not in config_data['serverless']: config_data['serverless']['runtime_memory'] = RUNTIME_MEMORY if 'runtime_timeout' not in config_data['serverless']: config_data['serverless']['runtime_timeout'] = RUNTIME_TIMEOUT if 'runtime' in config_data['knative']: config_data['serverless']['runtime'] = config_data['knative']['runtime'] if 'runtime' not in config_data['serverless']: if not DOCKER_PATH: raise Exception('docker command not found. Install docker or use ' 'an already built runtime') if 'docker_user' not in config_data['knative']: config_data['knative']['docker_user'] = get_docker_username() if not config_data['knative']['docker_user']: raise Exception('You must provide "docker_user" param in config ' 'or execute "docker login"') docker_user = config_data['knative']['docker_user'] python_version = version_str(sys.version_info).replace('.', '') revision = 'latest' if 'dev' in __version__ else __version__.replace('.', '') runtime_name = '{}/{}-v{}:{}'.format(docker_user, RUNTIME_NAME, python_version, revision) config_data['serverless']['runtime'] = runtime_name else: if config_data['serverless']['runtime'].count('/') > 1: # container registry is in the provided runtime name cr, rn = config_data['serverless']['runtime'].split('/', 1) config_data['knative']['container_registry'] = cr config_data['serverless']['runtime'] = rn if 'workers' not in config_data['lithops']: max_instances = config_data['knative']['max_instances'] concurrency = config_data['knative']['concurrency'] config_data['lithops']['workers'] = int(max_instances * concurrency) if 'invoke_pool_threads' not in config_data['knative']: config_data['knative']['invoke_pool_threads'] = config_data['lithops']['workers'] config_data['serverless']['invoke_pool_threads'] = config_data['knative']['invoke_pool_threads']
def select_runtime(self, job_id, runtime_memory): """ Auxiliary method that selects the runtime to use. To do so it gets the runtime metadata from the storage. This metadata contains the preinstalled python modules needed to serialize the local function. If the .metadata file does not exists in the storage, this means that the runtime is not installed, so this method will proceed to install it. """ runtime_name = self.config['lithops']['runtime'] if runtime_memory is None: runtime_memory = self.config['lithops']['runtime_memory'] if runtime_memory: runtime_memory = int(runtime_memory) log_msg = ('ExecutorID {} | JobID {} - Selected Runtime: {} - {}MB' .format(self.executor_id, job_id, runtime_name, runtime_memory)) else: log_msg = ('ExecutorID {} | JobID {} - Selected Runtime: {}' .format(self.executor_id, job_id, runtime_name)) logger.info(log_msg) if not self.log_active: print(log_msg, end=' ') installing = False for compute_handler in self.compute_handlers: runtime_key = compute_handler.get_runtime_key(runtime_name, runtime_memory) runtime_deployed = True try: runtime_meta = self.internal_storage.get_runtime_meta(runtime_key) except Exception: runtime_deployed = False if not runtime_deployed: logger.debug('ExecutorID {} | JobID {} - Runtime {} with {}MB is not yet ' 'installed'.format(self.executor_id, job_id, runtime_name, runtime_memory)) if not self.log_active and not installing: installing = True print('(Installing...)') timeout = self.config['lithops']['runtime_timeout'] logger.debug('Creating runtime: {}, memory: {}MB'.format(runtime_name, runtime_memory)) runtime_meta = compute_handler.create_runtime(runtime_name, runtime_memory, timeout=timeout) self.internal_storage.put_runtime_meta(runtime_key, runtime_meta) py_local_version = version_str(sys.version_info) py_remote_version = runtime_meta['python_ver'] if py_local_version != py_remote_version: raise Exception(("The indicated runtime '{}' is running Python {} and it " "is not compatible with the local Python version {}") .format(runtime_name, py_remote_version, py_local_version)) if not self.log_active and runtime_deployed: print() return runtime_meta
def extend_runtime(job, compute_handler, internal_storage): """ This method is used when customized_runtime is active """ base_docker_image = job.runtime_name uuid = job.ext_runtime_uuid ext_runtime_name = "{}:{}".format(base_docker_image.split(":")[0], uuid) # update job with new extended runtime name job.runtime_name = ext_runtime_name runtime_key = compute_handler.get_runtime_key(job.runtime_name, job.runtime_memory) runtime_meta = internal_storage.get_runtime_meta(runtime_key) if not runtime_meta: logger.info('Creating runtime: {}, memory: {}MB'.format( ext_runtime_name, job.runtime_memory)) ext_docker_file = '/'.join([job.local_tmp_dir, "Dockerfile"]) # Generate Dockerfile extended with function dependencies and function with open(ext_docker_file, 'w') as df: df.write('\n'.join([ 'FROM {}'.format(base_docker_image), 'ENV PYTHONPATH=/tmp/lithops/modules:$PYTHONPATH', # set python path to point to dependencies folder 'COPY . /tmp/lithops' ])) # Build new extended runtime tagged by function hash cwd = os.getcwd() os.chdir(job.local_tmp_dir) compute_handler.build_runtime(ext_runtime_name, ext_docker_file) os.chdir(cwd) shutil.rmtree(job.local_tmp_dir, ignore_errors=True) runtime_meta = compute_handler.create_runtime(ext_runtime_name, job.runtime_memory, job.runtime_timeout) runtime_meta['runtime_timeout'] = job.runtime_timeout internal_storage.put_runtime_meta(runtime_key, runtime_meta) # Verify python version and lithops version if lithops_version != runtime_meta['lithops_version']: raise Exception( "Lithops version mismatch. Host version: {} - Runtime version: {}". format(lithops_version, runtime_meta['lithops_version'])) py_local_version = version_str(sys.version_info) py_remote_version = runtime_meta['python_version'] if py_local_version != py_remote_version: raise Exception( ("The indicated runtime '{}' is running Python {} and it " "is not compatible with the local Python version {}").format( job.runtime_name, py_remote_version, py_local_version))
def _create_function(self, runtime_name, memory, code, timeout=60, trigger='HTTP'): logger.debug("Creating function {} - Memory: {} Timeout: {} Trigger: {}".format(runtime_name, memory, timeout, trigger)) default_location = self._full_default_location() function_location = self._full_function_location(self._format_function_name(runtime_name, memory)) bin_name = self._format_function_name(runtime_name, memory) + '_bin.zip' self.internal_storage.put_data(bin_name, code) python_runtime_ver = 'python{}'.format(version_str(sys.version_info)) cloud_function = { 'name': function_location, 'description': self.package, 'entryPoint': 'main', 'runtime': python_runtime_ver.lower().replace('.', ''), 'timeout': str(timeout) + 's', 'availableMemoryMb': memory, 'serviceAccountEmail': self.service_account, 'maxInstances': 0, 'sourceArchiveUrl': 'gs://{}/{}'.format(self.internal_storage.bucket, bin_name) } if trigger == 'HTTP': cloud_function['httpsTrigger'] = {} elif trigger == 'Pub/Sub': topic_location = self._full_topic_location(self._format_topic_name(runtime_name, memory)) cloud_function['eventTrigger'] = { 'eventType': 'providers/cloud.pubsub/eventTypes/topic.publish', 'resource': topic_location, 'failurePolicy': {} } response = self._get_funct_conn().projects().locations().functions().create( location=default_location, body=cloud_function ).execute(num_retries=self.num_retries) # Wait until function is completely deployed while True: response = self._get_funct_conn().projects().locations().functions().get( name=function_location ).execute(num_retries=self.num_retries) logger.debug('Function status is {}'.format(response['status'])) if response['status'] == 'ACTIVE': break elif response['status'] == 'OFFLINE': raise Exception('Error while deploying Cloud Function') elif response['status'] == 'DEPLOY_IN_PROGRESS': time.sleep(self.retry_sleep) logger.info('Waiting for function to be deployed...') else: raise Exception('Unknown status {}'.format(response['status'])) # Delete runtime bin archive from storage self.internal_storage.storage.delete_object(self.internal_storage.bucket, bin_name)
def load_config(config_data): if 'knative' not in config_data or not config_data['knative']: config_data['knative'] = {} if 'git_url' not in config_data['knative']: config_data['knative']['git_url'] = BUILD_GIT_URL if 'git_rev' not in config_data['knative']: revision = 'master' if 'dev' in __version__ else __version__ config_data['knative']['git_rev'] = revision if 'docker_repo' not in config_data['knative']: config_data['knative']['docker_repo'] = DOCKER_REPO if 'cpu' not in config_data['knative']: config_data['knative']['cpu'] = RUNTIME_CPU if 'concurrency' not in config_data['knative']: config_data['knative']['concurrency'] = RUNTIME_CONCURRENCY if 'min_instances' not in config_data['knative']: config_data['knative']['min_instances'] = RUNTIME_MIN_INSTANCES if 'max_instances' not in config_data['knative']: config_data['knative']['max_instances'] = RUNTIME_MAX_INSTANCES if 'runtime_memory' not in config_data['serverless']: config_data['serverless']['runtime_memory'] = RUNTIME_MEMORY if 'runtime_timeout' not in config_data['serverless']: config_data['serverless']['runtime_timeout'] = RUNTIME_TIMEOUT if 'runtime' not in config_data['serverless']: if 'docker_user' not in config_data['knative']: cmd = "{} info".format(DOCKER_PATH) docker_user_info = sp.check_output(cmd, shell=True, encoding='UTF-8', stderr=sp.STDOUT) for line in docker_user_info.splitlines(): if 'Username' in line: _, useranme = line.strip().split(':') config_data['knative']['docker_user'] = useranme.strip() break if 'docker_user' not in config_data['knative']: raise Exception('You must provide "docker_user" param in config ' 'or execute "docker login"') docker_user = config_data['knative']['docker_user'] python_version = version_str(sys.version_info).replace('.', '') revision = 'latest' if 'dev' in __version__ else __version__.replace( '.', '') runtime_name = '{}/{}-v{}:{}'.format(docker_user, RUNTIME_NAME, python_version, revision) config_data['serverless']['runtime'] = runtime_name if 'workers' not in config_data['lithops']: max_instances = config_data['knative']['max_instances'] concurrency = config_data['knative']['concurrency'] config_data['lithops']['workers'] = int(max_instances * concurrency)
def load_config(config_data=None): if config_data is None: config_data = {} if 'runtime_memory' not in config_data['serverless']: config_data['serverless']['runtime_memory'] = RUNTIME_MEMORY_DEFAULT if 'runtime_timeout' not in config_data['serverless']: config_data['serverless']['runtime_timeout'] = RUNTIME_TIMEOUT_DEFAULT if 'runtime' not in config_data['serverless']: config_data['serverless']['runtime'] = 'python' + \ version_str(sys.version_info) if 'workers' not in config_data['lithops']: config_data['lithops']['workers'] = MAX_CONCURRENT_WORKERS if config_data['serverless'][ 'runtime_memory'] not in RUNTIME_MEMORY_OPTIONS: raise Exception('{} MB runtime is not available (Only {} MB)'.format( config_data['serverless']['runtime_memory'], RUNTIME_MEMORY_OPTIONS)) if config_data['serverless']['runtime_memory'] > RUNTIME_MEMORY_MAX: config_data['serverless']['runtime_memory'] = RUNTIME_MEMORY_MAX if config_data['serverless']['runtime_timeout'] > RUNTIME_TIMEOUT_DEFAULT: config_data['serverless']['runtime_timeout'] = RUNTIME_TIMEOUT_DEFAULT if 'gcp' not in config_data: raise Exception("'gcp' section is mandatory in the configuration") config_data['gcp']['retries'] = RETRIES config_data['gcp']['retry_sleeps'] = RETRY_SLEEPS # Put storage data into compute backend config dict entry storage_config = dict() storage_config['lithops'] = config_data['lithops'].copy() storage_config['gcp_storage'] = config_data['gcp'].copy() config_data['gcp']['storage'] = lithops_config.extract_storage_config( storage_config) required_parameters_0 = ('project_name', 'service_account', 'credentials_path') if not set(required_parameters_0) <= set(config_data['gcp']): raise Exception( "'project_name', 'service_account' and 'credentials_path' \ are mandatory under 'gcp' section") if not exists(config_data['gcp']['credentials_path']) or not isfile( config_data['gcp']['credentials_path']): raise Exception("Path {} must be credentials JSON file.".format( config_data['gcp']['credentials_path'])) config_data['gcp_functions'] = config_data['gcp'].copy() if 'region' not in config_data['gcp_functions']: config_data['gcp_functions']['region'] = config_data['pywren'][ 'compute_backend_region']
def load_config(config_data): if 'code_engine' not in config_data: config_data['code_engine'] = {} if 'kubectl_config' in config_data['code_engine']: print('"kubectl_config" variable in code_engine config is deprecated, use "kubecfg_path" instead') config_data['code_engine']['kubecfg_path'] = config_data['code_engine']['kubectl_config'] if 'cpu' in config_data['code_engine']: print('"cpu" variable in code_engine config is deprecated, use "runtime_cpu" instead') config_data['code_engine']['runtime_cpu'] = config_data['code_engine']['cpu'] if 'ibm' in config_data and config_data['ibm'] is not None: config_data['code_engine'].update(config_data['ibm']) if 'runtime_cpu' not in config_data['code_engine']: config_data['code_engine']['runtime_cpu'] = RUNTIME_CPU if 'runtime_memory' not in config_data['code_engine']: config_data['code_engine']['runtime_memory'] = RUNTIME_MEMORY if 'runtime_timeout' not in config_data['code_engine']: config_data['code_engine']['runtime_timeout'] = RUNTIME_TIMEOUT if 'runtime' not in config_data['code_engine']: if not DOCKER_PATH: raise Exception('docker command not found. Install docker or use ' 'an already built runtime') if 'docker_user' not in config_data['code_engine']: config_data['code_engine']['docker_user'] = get_docker_username() if not config_data['code_engine']['docker_user']: raise Exception('You must execute "docker login" or provide "docker_user" ' 'param in config under "code_engine" section') docker_user = config_data['code_engine']['docker_user'] python_version = version_str(sys.version_info).replace('.', '') revision = 'latest' if 'dev' in __version__ else __version__.replace('.', '') runtime_name = '{}/{}-v{}:{}'.format(docker_user, RUNTIME_NAME, python_version, revision) config_data['code_engine']['runtime'] = runtime_name runtime_cpu = config_data['code_engine']['runtime_cpu'] if runtime_cpu not in VALID_CPU_VALUES: raise Exception('{} is an invalid runtime cpu value. Set one of: ' '{}'.format(runtime_cpu, VALID_CPU_VALUES)) runtime_memory = config_data['code_engine']['runtime_memory'] if runtime_memory not in VALID_MEMORY_VALUES: raise Exception('{} is an invalid runtime memory value in MB. Set one of: ' '{}'.format(runtime_memory, VALID_MEMORY_VALUES)) region = config_data['code_engine'].get('region') if region and region not in VALID_REGIONS: raise Exception('{} is an invalid region name. Set one of: ' '{}'.format(region, VALID_REGIONS)) if 'workers' not in config_data['lithops'] or \ config_data['lithops']['workers'] > MAX_CONCURRENT_WORKERS: config_data['lithops']['workers'] = MAX_CONCURRENT_WORKERS
def _extend_runtime(self, job): runtime_memory = self.config['serverless']['runtime_memory'] base_docker_image = self.runtime_name uuid = job.ext_runtime_uuid ext_runtime_name = "{}:{}".format(base_docker_image.split(":")[0], uuid) # update job with new extended runtime name self.runtime_name = ext_runtime_name runtime_key = self.compute_handler.get_runtime_key(self.runtime_name, runtime_memory) runtime_meta = self.internal_storage.get_runtime_meta(runtime_key) if not runtime_meta: runtime_timeout = self.config['serverless']['runtime_timeout'] logger.debug('Creating runtime: {}, memory: {}MB'.format(ext_runtime_name, runtime_memory)) runtime_temorary_directory = '/'.join([LITHOPS_TEMP_DIR, os.path.dirname(job.func_key)]) modules_path = '/'.join([runtime_temorary_directory, 'modules']) ext_docker_file = '/'.join([runtime_temorary_directory, "Dockerfile"]) # Generate Dockerfile extended with function dependencies and function with open(ext_docker_file, 'w') as df: df.write('\n'.join([ 'FROM {}'.format(base_docker_image), 'ENV PYTHONPATH={}:${}'.format(modules_path, 'PYTHONPATH'), # set python path to point to dependencies folder 'COPY . {}'.format(runtime_temorary_directory) ])) # Build new extended runtime tagged by function hash cwd = os.getcwd() os.chdir(runtime_temorary_directory) self.compute_handler.build_runtime(ext_runtime_name, ext_docker_file) os.chdir(cwd) runtime_meta = self.compute_handler.create_runtime(ext_runtime_name, runtime_memory, runtime_timeout) runtime_meta['runtime_timeout'] = runtime_timeout self.internal_storage.put_runtime_meta(runtime_key, runtime_meta) if lithops_version != runtime_meta['lithops_version']: raise Exception("Lithops version mismatch. Host version: {} - Runtime version: {}" .format(lithops_version, runtime_meta['lithops_version'])) py_local_version = version_str(sys.version_info) py_remote_version = runtime_meta['python_ver'] if py_local_version != py_remote_version: raise Exception(("The indicated runtime '{}' is running Python {} and it " "is not compatible with the local Python version {}") .format(self.runtime_name, py_remote_version, py_local_version)) return runtime_meta