def cleanup_job_exe(job_exe_id): '''Cleans up a job execution :param job_exe_id: The job execution ID :type job_exe_id: int ''' logger.info('Cleaning up job execution %s', str(job_exe_id)) node_work_dir = settings.NODE_WORK_DIR job_exe_dir = get_job_exe_dir(job_exe_id, node_work_dir) job_exe = JobExecution.objects.get_job_exe_with_job_and_job_type(job_exe_id) job_type_name = job_exe.job.job_type.name # Run appropriate cleaner for job type if job_type_name in REGISTERED_CLEANERS: cleaner = REGISTERED_CLEANERS[job_type_name] else: cleaner = DEFAULT_CLEANER cleaner.cleanup_job_execution(job_exe) # Delete job execution directory if os.path.exists(job_exe_dir): logger.info('Deleting %s', job_exe_dir) shutil.rmtree(job_exe_dir) JobExecution.objects.cleanup_completed(job_exe_id, now()) logger.info('Successfully cleaned up job execution %s', str(job_exe_id))
def handle(self, **options): '''See :meth:`django.core.management.base.BaseCommand.handle`. This method starts the command. ''' exe_id = options.get('job_exe_id') logger.info('Command starting: scale_pre_steps - Job Execution ID: %i', exe_id) try: node_work_dir = settings.NODE_WORK_DIR job_exe = JobExecution.objects.get_job_exe_with_job_and_job_type(exe_id) job_dir = file_system.get_job_exe_dir(exe_id, node_work_dir) input_dir = file_system.get_job_exe_input_dir(exe_id, node_work_dir) output_dir = file_system.get_job_exe_output_dir(exe_id, node_work_dir) job_dirs = [job_dir, input_dir, output_dir] for target_dir in job_dirs: self._create_job_dir(exe_id, target_dir) job_interface = job_exe.get_job_interface() job_data = job_exe.job.get_job_data() job_environment = job_exe.get_job_environment() job_interface.perform_pre_steps(job_data, job_environment, exe_id) command_args = job_interface.fully_populate_command_argument(job_data, job_environment, exe_id) # This shouldn't be necessary once we have user namespaces in docker self._chmod_job_dir(file_system.get_job_exe_output_data_dir(exe_id)) # Perform a force pull for docker jobs to get the latest version of the image before running # TODO: Remove this hack in favor of the feature in Mesos 0.22.x, see MESOS-1886 for details docker_image = job_exe.job.job_type.docker_image if docker_image: logger.info('Pulling latest docker image: %s', docker_image) try: subprocess.check_call(['sudo', 'docker', 'pull', docker_image]) except subprocess.CalledProcessError: logger.exception('Docker pull returned unexpected exit code.') except OSError: logger.exception('OS unable to run docker pull command.') logger.info('Executing job: %i -> %s', exe_id, ' '.join(command_args)) JobExecution.objects.pre_steps_command_arguments(exe_id, command_args) except Exception as e: logger.exception('Job Execution %i: Error performing pre-job steps', exe_id) exit_code = -1 if isinstance(e, DatabaseError): exit_code = DB_EXIT_CODE elif isinstance(e, NfsError): exit_code = NFS_EXIT_CODE elif isinstance(e, IOError): exit_code = IO_EXIT_CODE sys.exit(exit_code) logger.info('Command completed: scale_pre_steps')
def get_ingest_work_dir(job_exe_id): '''Returns the work directory that a job execution can use to perform an ingest :param job_exe_id: The ID of the job execution :type job_exe_id: int :returns: The absolute path of the ingest work directory :rtype: str ''' job_exe_dir = get_job_exe_dir(job_exe_id) return os.path.join(job_exe_dir, 'ingest_work')
def get_ingest_work_dir(job_exe_id): '''Returns the work directory that a job execution can use to perform an ingest :param job_exe_id: The ID of the job execution :type job_exe_id: int :returns: The absolute path of the ingest work directory :rtype: str ''' job_exe_dir = get_job_exe_dir(job_exe_id, settings.NODE_WORK_DIR) return os.path.join(job_exe_dir, 'ingest_work')