def handle(self, *args, **options): """See :meth:`django.core.management.base.BaseCommand.handle`. This method starts the command. """ job_id = int(os.environ.get('SCALE_JOB_ID')) exe_num = int(os.environ.get('SCALE_EXE_NUM')) logger.info( 'Command starting: scale_post_steps - Job ID: %d, Execution Number: %d', job_id, exe_num) try: # Get the pre-loaded job_exe for efficiency job_exe = self._get_job_exe(job_id, exe_num) self._perform_post_steps(job_exe) except ScaleError as err: err.log() sys.exit(err.exit_code) except Exception as ex: exit_code = GENERAL_FAIL_EXIT_CODE err = get_error_by_exception(ex.__class__.__name__) if err: err.log() exit_code = err.exit_code else: logger.exception('Error performing post-job steps') sys.exit(exit_code) logger.info('Command completed: scale_post_steps')
def delete_files(files, volume_path, broker): """Deletes the given files within a workspace. :param files: List of named tuples containing path and ID of the file to delete. :type files: [collections.namedtuple] :param volume_path: Absolute path to the local container location onto which the volume file system was mounted, None if this broker does not use a container volume :type volume_path: string :param broker: The storage broker :type broker: `storage.brokers.broker.Broker` """ logger.info('Deleting %i files', len(files)) try: broker.delete_files(volume_path=volume_path, files=files, update_model=False) except ScaleError as err: err.log() sys.exit(err.exit_code) except Exception as ex: exit_code = GENERAL_FAIL_EXIT_CODE err = get_error_by_exception(ex.__class__.__name__) if err: err.log() exit_code = err.exit_code else: logger.exception('Error performing delete_files steps') sys.exit(exit_code) return
def handle(self, *args, **options): """See :meth:`django.core.management.base.BaseCommand.handle`. This method starts the command. """ exe_id = options.get('job_exe_id') logger.info( 'Command starting: scale_post_steps - Job Execution ID: %i', exe_id) try: # Get the pre-loaded job_exe for efficiency job_exe = self._get_job_exe(exe_id) self._perform_post_steps(job_exe) except ScaleError as err: err.log() sys.exit(err.exit_code) except Exception as ex: exit_code = GENERAL_FAIL_EXIT_CODE err = get_error_by_exception(ex.__class__.__name__) if err: err.log() exit_code = err.exit_code else: logger.exception( 'Job Execution %i: Error performing post-job steps', exe_id) sys.exit(exit_code) logger.info('Command completed: scale_post_steps')
def handle(self, *args, **options): """See :meth:`django.core.management.base.BaseCommand.handle`. This method starts the command. """ job_id = int(os.environ.get('SCALE_JOB_ID')) exe_num = int(os.environ.get('SCALE_EXE_NUM')) logger.info( 'Command starting: scale_pre_steps - Job ID: %d, Execution Number: %d', job_id, exe_num) try: job_exe = self._get_job_exe(job_id, exe_num) job_interface = job_exe.job_type.get_job_interface() exe_config = job_exe.get_execution_configuration() logger.info('Validating mounts...') job_interface.validate_populated_mounts(exe_config) logger.info('Validating settings...') job_interface.validate_populated_settings(exe_config) logger.info('Validating outputs and workspaces...') job_interface.validate_workspace_for_outputs(exe_config) self._generate_input_metadata(job_exe) job_data = job_exe.job.get_job_data() job_data = JobData(job_data.get_dict()) logger.info('Setting up input files...') job_interface.perform_pre_steps(job_data) logger.info('Ready to execute job: %s', exe_config.get_args('main')) except ScaleError as err: err.log() sys.exit(err.exit_code) except Exception as ex: exit_code = GENERAL_FAIL_EXIT_CODE err = get_error_by_exception(ex.__class__.__name__) if err: err.log() exit_code = err.exit_code else: logger.exception('Error performing pre-job steps') sys.exit(exit_code) logger.info('Command completed: scale_pre_steps')
def handle(self, *args, **options): """See :meth:`django.core.management.base.BaseCommand.handle`. This method starts the command. """ job_exe_id = options.get('job_exe_id') logger.info('Command starting: scale_pre_steps - Job Execution ID: %i', job_exe_id) try: job_exe = self._get_job_exe(job_exe_id) job_interface = job_exe.get_job_interface() job_configuration = job_exe.get_execution_configuration() job_interface.validate_populated_mounts(job_configuration) job_interface.validate_populated_settings(job_configuration) job_data = job_exe.job.get_job_data() job_environment = job_exe.get_job_environment() job_interface.perform_pre_steps(job_data, job_environment) command_args = job_interface.fully_populate_command_argument(job_data, job_environment, job_exe_id) command_args = job_interface.populate_command_argument_settings(command_args, job_configuration, job_exe.job.job_type) logger.info('Executing job: %i -> %s', job_exe_id, ' '.join(command_args)) self._populate_command_arguments(job_exe_id, command_args) except ScaleError as err: err.log() sys.exit(err.exit_code) except Exception as ex: exit_code = GENERAL_FAIL_EXIT_CODE err = get_error_by_exception(ex.__class__.__name__) if err: err.log() exit_code = err.exit_code else: logger.exception('Job Execution %i: Error performing pre-job steps', job_exe_id) sys.exit(exit_code) logger.info('Command completed: scale_pre_steps')
def move_files(file_ids, new_workspace=None, new_file_path=None): """Moves the given files to a different workspace/uri :param file_ids: List of ids of ScaleFile objects to move; should all be from the same workspace :type file_ids: [int] :param new_workspace: New workspace to move files to :type new_workspace: `storage.models.Workspace` :param new_file_path: New path for files :type new_file_path: string """ try: messages = [] files = ScaleFile.objects.all() files = files.select_related('workspace') files = files.defer('workspace__json_config') files = files.filter(id__in=file_ids).only('id', 'file_name', 'file_path', 'workspace') old_files = [] old_workspace = files[0].workspace if new_workspace: # We need a local path to copy the file, try to get a direct path from the broker, if that fails we must # download the file and copy from there # TODO: a future refactor should make the brokers work off of file objects instead of paths so the extra # download is not necessary paths = old_workspace.get_file_system_paths([files]) local_paths = [] if paths: local_paths = paths else: file_downloads = [] for file in files: local_path = os.path.join('/tmp', file.file_name) file_downloads.append(FileDownload(file, local_path, False)) local_paths.append(local_path) ScaleFile.objects.download_files(file_downloads) uploads = [] for file, path in zip(files, local_paths): old_path = file.file_path old_files.append( ScaleFile(file_name=file.file_name, file_path=file.file_path)) file.file_path = new_file_path if new_file_path else file.file_path logger.info('Copying %s in workspace %s to %s in workspace %s', old_path, file.workspace.name, file.file_path, new_workspace.name) file_upload = FileUpload(file, path) uploads.append(file_upload) message = create_move_file_message(file_id=file.id) messages.append(message) ScaleFile.objects.upload_files(new_workspace, uploads) elif new_file_path: moves = [] for file in files: logger.info('Moving %s to %s in workspace %s', file.file_path, new_file_path, file.workspace.name) moves.append(FileMove(file, new_file_path)) message = create_move_file_message(file_id=file.id) messages.append(message) ScaleFile.objects.move_files(moves) else: logger.info('No new workspace or file path. Doing nothing') CommandMessageManager().send_messages(messages) if new_workspace: # Copied files to new workspace, so delete file in old workspace (if workspace provides local path to do so) old_workspace.delete_files(old_files, update_model=False) except ScaleError as err: err.log() sys.exit(err.exit_code) except Exception as ex: exit_code = GENERAL_FAIL_EXIT_CODE err = get_error_by_exception(ex.__class__.__name__) if err: err.log() exit_code = err.exit_code else: logger.exception('Error performing move_files steps') sys.exit(exit_code)