def get_injected_command_args(self, values, env_vars): """Gets the command, injecting env_vars and input variables :param values: Input values to replace named placeholders in command value :type values: {str, str} :param env_vars: Incoming environment variables :type env_vars: dict :return: the command :rtype: str """ from util.command import environment_expansion return environment_expansion(env_vars, self.get_command())
def test_multi_instances_var_expansion_success(self): result = environment_expansion( self.env_dict, '$HELLO ${THERE/#/-t }, ${SUPER_FRIEND}') self.assertEquals('Hello -t there, friend', result)
def _configure_regular_job(config, job_exe, job_type, system_logging_level): """Configures the given execution as a regular (non-system) job by adding pre and post tasks, input/output mounts, etc :param config: The execution configuration :type config: :class:`job.execution.configuration.json.exe_config.ExecutionConfiguration` :param job_exe: The job execution model being scheduled :type job_exe: :class:`job.models.JobExecution` :param job_type: The job type model :type job_type: :class:`job.models.JobType` :param system_logging_level: The logging level to be passed in through environment :type system_logging_level: str """ config.create_tasks(['pull', 'pre', 'main', 'post']) config.add_to_task('pull', args=create_pull_command(job_exe.docker_image)) config.add_to_task('pre', args=PRE_TASK_COMMAND_ARGS) config.add_to_task('post', args=POST_TASK_COMMAND_ARGS) # Configure input workspaces ro_input_workspaces = {} rw_input_workspaces = {} for input_workspace in config.get_input_workspace_names(): ro_input_workspaces[input_workspace] = TaskWorkspace(input_workspace, MODE_RO) rw_input_workspaces[input_workspace] = TaskWorkspace(input_workspace, MODE_RW) config.add_to_task('pre', workspaces=ro_input_workspaces) config.add_to_task('main', workspaces=ro_input_workspaces) # Post tasks have access to input workspaces in case input files need moved as part of parse results config.add_to_task('post', workspaces=rw_input_workspaces) # Configure output workspaces output_workspaces = {} for output_workspace in config.get_output_workspace_names(): output_workspaces[output_workspace] = TaskWorkspace(output_workspace, MODE_RW) config.add_to_task('post', workspaces=output_workspaces) # Configure input/output mounts input_mnt_name = 'scale_input_mount' output_mnt_name = 'scale_output_mount' input_vol_name = get_job_exe_input_vol_name(job_exe) output_vol_name = get_job_exe_output_vol_name(job_exe) input_vol_ro = Volume(input_vol_name, SCALE_JOB_EXE_INPUT_PATH, MODE_RO, is_host=False) input_vol_rw = Volume(input_vol_name, SCALE_JOB_EXE_INPUT_PATH, MODE_RW, is_host=False) output_vol_ro = Volume(output_vol_name, SCALE_JOB_EXE_OUTPUT_PATH, MODE_RO, is_host=False) output_vol_rw = Volume(output_vol_name, SCALE_JOB_EXE_OUTPUT_PATH, MODE_RW, is_host=False) config.add_to_task('pre', mount_volumes={input_mnt_name: input_vol_rw, output_mnt_name: output_vol_rw}, env_vars={'SYSTEM_LOGGING_LEVEL': system_logging_level}) config.add_to_task('main', mount_volumes={input_mnt_name: input_vol_ro, output_mnt_name: output_vol_rw}) config.add_to_task('post', mount_volumes={output_mnt_name: output_vol_ro}, env_vars={'SYSTEM_LOGGING_LEVEL': system_logging_level}) # Configure output directory # TODO: original output dir and command arg replacement can be removed when Scale no longer supports old-style # job types env_vars = {'job_output_dir': SCALE_JOB_EXE_OUTPUT_PATH, 'OUTPUT_DIR': SCALE_JOB_EXE_OUTPUT_PATH} args = config._get_task_dict('main')['args'] # TODO: Remove old-style logic for command parameters inject when with v6 if not JobInterfaceSunset.is_seed_dict(job_type.manifest): args = JobInterface.replace_command_parameters(args, env_vars) else: args = environment_expansion(env_vars, args, remove_extras=True) config.add_to_task('main', args=args, env_vars=env_vars) # Configure task resources resources = job_exe.get_resources() # Pull-task and pre-task require full amount of resources config.add_to_task('pull', resources=resources) config.add_to_task('pre', resources=resources) # Main-task no longer requires the input file space resources.subtract(NodeResources([Disk(job_exe.input_file_size)])) config.add_to_task('main', resources=resources) # Post-task no longer requires any disk space resources.remove_resource('disk') config.add_to_task('post', resources=resources)
def test_unmatched_curly_var_expansion_ignored(self): with self.assertRaises(UnbalancedBrackets): environment_expansion(self.env_dict, '${HELLO} ${THERE ${SUPER_FRIEND}')
def test_prefixed_var_expansion_success(self): result = environment_expansion(self.env_dict, '${THERE/#/-t }') self.assertEquals('-t there', result)
def test_missing_var_expansion_remove_2(self): result = environment_expansion(self.env_dict, '${HELLO} ${THERE} ${PEOPLE} ${HELLO}', remove_extras=True) self.assertEquals('Hello there Hello', result)
def test_wrapped_var_expansion_success(self): result = environment_expansion(self.env_dict, '${HELLO} ${THERE}') self.assertEquals('Hello there', result)
def test_naked_var_expansion_numeral_success(self): result = environment_expansion(self.env_dict, '$HELLO $THERE number $COUNT') self.assertEquals('Hello there number 1', result)
def test_naked_var_expansion_success(self): result = environment_expansion(self.env_dict, '$HELLO $THERE') self.assertEquals('Hello there', result)
def _configure_secrets(self, config, job_exe, job_type, interface): """Creates a copy of the configuration, configures secrets (masked in one of the copies), and applies any final configuration :param config: The execution configuration, where the secrets will be masked out :type config: :class:`job.execution.configuration.json.exe_config.ExecutionConfiguration` :param job_exe: The job execution model being scheduled :type job_exe: :class:`job.models.JobExecution` :param job_type: The job type model :type job_type: :class:`job.models.JobType` :param interface: The job interface :type interface: :class:`job.configuration.interface.job_interface.JobInterface` :returns: The copy of the execution configuration that contains the secrets :rtype: :class:`job.execution.configuration.json.exe_config.ExecutionConfiguration` """ # Copy the configuration config_with_secrets = config.create_copy() # Configure settings values, some are secret if job_type.is_system: config.add_to_task('main', settings=self._system_settings_hidden) config_with_secrets.add_to_task('main', settings=self._system_settings) else: config.add_to_task('pre', settings=self._system_settings_hidden) config_with_secrets.add_to_task('pre', settings=self._system_settings) config.add_to_task('post', settings=self._system_settings_hidden) config_with_secrets.add_to_task('post', settings=self._system_settings) job_config = job_exe.job.get_job_configuration() secret_settings = secrets_mgr.retrieve_job_type_secrets( job_type.get_secrets_key()) for _config, secrets_hidden in [(config, True), (config_with_secrets, False)]: task_settings = {} for setting in interface.get_settings(): name = setting['name'] if setting['secret']: value = None if name in secret_settings: value = secret_settings[name] if value is not None and secrets_hidden: value = '*****' else: value = job_config.get_setting_value(name) if 'required' in setting and setting[ 'required'] or value is not None: task_settings[name] = value args = config._get_task_dict('main')['args'] args = environment_expansion(task_settings, args) _config.add_to_task('main', args=args, settings=task_settings) # Configure env vars for settings for _config in [config, config_with_secrets]: for task_type in _config.get_task_types(): env_vars = {} for name, value in _config.get_settings(task_type).items(): if value is not None: env_name = normalize_env_var_name(name) env_vars[env_name] = value _config.add_to_task(task_type, env_vars=env_vars) # Configure Docker parameters for env vars and Docker volumes for _config in [config, config_with_secrets]: existing_volumes = set() for task_type in _config.get_task_types(): docker_params = [] for name, value in _config.get_env_vars(task_type).items(): docker_params.append( DockerParameter('env', '%s=%s' % (name, value))) for name, volume in _config.get_volumes(task_type).items(): docker_params.append( volume.to_docker_param( is_created=(name in existing_volumes))) existing_volumes.add(name) _config.add_to_task(task_type, docker_params=docker_params) return config_with_secrets