def _parse_config(self): # Read output list output = ConfigUtils.read_cfg_var('output') if output != '': self.output = output else: get_logger().warning( 'There is no output defined for this function.') # Read input list input = ConfigUtils.read_cfg_var('input') if input != '': self.input = input else: get_logger().warning( 'There is no input defined for this function.') # Read storage_providers dict storage_providers = ConfigUtils.read_cfg_var('storage_providers') if (storage_providers and storage_providers != ''): # s3 storage provider auth if ('s3' in storage_providers and storage_providers['s3']): self._validate_s3_creds(storage_providers['s3']) # minio storage provider auth if ('minio' in storage_providers and storage_providers['minio']): self._validate_minio_creds(storage_providers['minio']) # onedata storage provider auth if ('onedata' in storage_providers and storage_providers['onedata']): self._validate_onedata_creds(storage_providers['onedata']) else: get_logger().warning( 'There is no storage provider defined for this function.')
def _parse_exec_script_and_commands(self): # Check for script in function event if 'script' in self.raw_event: self.script_path = f"{self.input_folder}/script.sh" script_content = StrUtils.base64_to_str(self.raw_event['script']) FileUtils.create_file_with_content(self.script_path, script_content) # Container invoked with arguments elif 'cmd_args' in self.raw_event: # Add args self.cmd_args = json.loads(self.raw_event['cmd_args']) # Script to be executed every time (if defined) elif ConfigUtils.read_cfg_var('init_script') is not '': # Add init script self.init_script_path = f"{self.input_folder}/init_script.sh" FileUtils.cp_file(ConfigUtils.read_cfg_var('init_script'), self.init_script_path)
def get_remaining_time_in_seconds(self): """Returns the amount of time remaining for the invocation in seconds.""" remaining_time = int(self.context.get_remaining_time_in_millis() / 1000) timeout_threshold = SysUtils.get_env_var('TIMEOUT_THRESHOLD') if timeout_threshold is '': timeout_threshold = ConfigUtils.read_cfg_var('container')['timeout_threshold'] return remaining_time - int(timeout_threshold)
def test_read_cfg_var_config_file(self): with mock.patch.dict('os.environ', {'AWS_EXECUTION_ENV': 'AWS_Lambda_'}, clear=True): mopen = mock.mock_open(read_data=CONFIG_FILE) with mock.patch('builtins.open', mopen, create=True): var = ConfigUtils.read_cfg_var('name') mopen.assert_called_once_with('/var/task/function_config.yaml') self.assertEqual(var, 'test-func')
def __init__(self, lambda_instance): self.lambda_instance = lambda_instance # Create required udocker folder FileUtils.create_folder(SysUtils.get_env_var("UDOCKER_DIR")) # Init the udocker command that will be executed self.udocker_exec = [SysUtils.get_env_var("UDOCKER_EXEC")] self.cont_cmd = self.udocker_exec + ["--quiet", "run"] self.cont_img_id = ConfigUtils.read_cfg_var('container').get('image') if not self.cont_img_id: raise ContainerImageNotFoundError()
def _get_overrides(self): batch = ConfigUtils.read_cfg_var("batch") if batch.get("multi_node_parallel").get("enabled") == True: return { "nodeOverrides": { "nodePropertyOverrides": [{ "containerOverrides": { "environment": self.batch_job_env_vars }, "targetNodes": "0:" }] } } else: return { "containerOverrides": { "environment": self.batch_job_env_vars } }
def _get_overrides(self): batch = ConfigUtils.read_cfg_var(self.context, "batch") if batch.get("multi_node_parallel").get("enabled") == True: num_nodes = batch.get("multi_node_parallel").get("number_nodes") target_nodes = num_nodes - 1 return { "nodeOverrides": { "nodePropertyOverrides": [{ "containerOverrides": { "environment": self.batch_job_env_vars }, "targetNodes": "0:" + str(target_nodes) }], "numNodes": num_nodes } } else: return { "containerOverrides": { "environment": self.batch_job_env_vars } }
def _is_lambda_batch_execution(): return ConfigUtils.read_cfg_var("execution_mode") == "lambda-batch"
def is_batch_execution(): return ConfigUtils.read_cfg_var("execution_mode") == "batch"
def _get_log_level(): loglevel = logging.INFO config_level = ConfigUtils.read_cfg_var('log_level') if config_level is not '': loglevel = logging.getLevelName(config_level) return loglevel
def test_read_cfg_var_config_encoded(self): with mock.patch.dict('os.environ', {'FUNCTION_CONFIG': StrUtils.utf8_to_base64_string(CONFIG_FILE)}, clear=True): self.assertEqual(ConfigUtils.read_cfg_var('name'), 'test-func')
def test_read_cfg_var_environment(self): with mock.patch.dict('os.environ', {'LOG_LEVEL': 'TEST'}, clear=True): self.assertEqual(ConfigUtils.read_cfg_var('log_level'), 'TEST')