def setUp(self): self.r = geRunner( tmp_id=self.id_string, tmp_path=self.tmp_path, out_globs=self.out_glob, command=self.cmd_simple, input_data=self.input_data, input_string=self.input_string, output_string=self.output_string, flags=self.flags, options=self.options, std_out_str=self.std_out, ) self.r2 = geRunner( tmp_id=self.id_string, tmp_path=self.tmp_path, out_globs=self.out_glob, command=self.cmd_simple, input_data=self.input_data, output_string=self.output_string, flags=self.flags, std_out_str=self.std_out, ) self.r3 = geRunner( tmp_id=self.id_string, tmp_path=self.tmp_path, out_globs=self.out_glob, command=self.cmd_simple, input_data=self.input_data, input_string=self.input_string, output_string=self.output_string, flags=self.interpolation_flags, options=self.interpolation_options, std_out_str=self.std_out, )
def make_runner(value, uuid, t, out_globs, in_globs, data_dict, params, param_values, stdoglob, environment, state, step_id, self, execution_behaviour): ''' Not yet covered with unit tests ''' kwargs = { 'tmp_id': uuid, 'tmp_path': t.backend.root_path, 'out_globs': out_globs, 'in_globs': in_globs, 'input_data': data_dict, 'params': params, 'param_values': param_values, 'identifier': uuid, 'std_out_str': uuid + stdoglob, 'env_vars': environment, 'debug': False, } if settings.DEBUG: kwargs['debug'] = True if value: kwargs['value_string'] = value if execution_behaviour == QueueType.LOCALHOST: logger.info("Running On LOCALHOST") kwargs['command'] = t.executable return localRunner(**kwargs) if execution_behaviour == QueueType.GRIDENGINE: if ge_available: logger.info("Running At GRIDENGINE") kwargs['command'] = t.executable return geRunner(**kwargs) else: raise OSError("Grid Engine Libraries not available") if execution_behaviour == QueueType.R: logger.info("Running R") kwargs['script'] = t.executable return rRunner(**kwargs) if execution_behaviour == QueueType.PYTHON: logger.info("Running Python") kwargs['script'] = t.executable return pythonRunner(**kwargs) return None
def make_runner(value, uuid, t, out_globs, in_globs, data_dict, params, param_values, stdoglob, environment, state, step_id, self, execution_behaviour): ''' Not yet covered with unit tests ''' kwargs = {'tmp_id': uuid, 'tmp_path': t.backend.root_path, 'out_globs': out_globs, 'in_globs': in_globs, 'input_data': data_dict, 'params': params, 'param_values': param_values, 'identifier': uuid, 'std_out_str': uuid+stdoglob, 'env_vars': environment, 'debug': False, } if settings.DEBUG: kwargs['debug'] = True if value: kwargs['value_string'] = value if execution_behaviour == QueueType.LOCALHOST: logger.info("Running On LOCALHOST") kwargs['command'] = t.executable return localRunner(**kwargs) if execution_behaviour == QueueType.GRIDENGINE: if ge_available: logger.info("Running At GRIDENGINE") kwargs['command'] = t.executable return geRunner(**kwargs) else: raise OSError("Grid Engine Libraries not available") if execution_behaviour == QueueType.R: logger.info("Running R") kwargs['script'] = t.executable return rRunner(**kwargs) if execution_behaviour == QueueType.PYTHON: logger.info("Running Python") kwargs['script'] = t.executable return pythonRunner(**kwargs) return None
def task_runner(self, uuid, step_id, current_step, total_steps, task_name, flags, options): """ Here is the action. Takes and task name and a job UUID. Gets the task config from the db and the job data and runs the job. Also needs to give control to whichever library supports the backend in question. Once the data is on the backend this task then just watches the backend until the job is done.d Results are pushed to the frontend db but because they are files we just use the celery results for messaging and the results table for the files """ logger.info("TASK:" + task_name) logger.info("CURRENT STEP:" + str(current_step)) logger.info("TOTAL STEPS:" + str(total_steps)) logger.info("STEP ID:" + str(step_id)) s = Submission.objects.get(UUID=uuid) t = Task.objects.get(name=task_name) state = Submission.ERROR in_globs = "".join(t.in_glob.split()).split(",") out_globs = "".join(t.out_glob.split()).split(",") data_dict, previous_step = get_data(s, uuid, current_step, in_globs) iglob = in_globs[0].lstrip(".") oglob = out_globs[0].lstrip(".") # update submission tracking to note that this is running Submission.update_submission_state(s, True, Submission.RUNNING, step_id, self.request.id, 'About to run step: ' + str(current_step)) stdoglob = ".stdout" if t.stdout_glob is not None and len(t.stdout_glob) > 0: stdoglob = "."+t.stdout_glob.lstrip(".") # Now we run the task handing off the actual running to the commandRunner # library run = None # Here we get the users list and decide which one to submit the job with # TODO: Candidate to move to the command runner as it should handle the # finding out what is happening on the backend. Perhaps API call in # which returns the number of running processes and maybe the load average try: if t.backend.server_type == Backend.LOCALHOST: logger.info("Running At LOCALHOST") run = localRunner(tmp_id=uuid, tmp_path=t.backend.root_path, out_globs=out_globs, command=t.executable, input_data=data_dict, flags=flags, options=options, std_out_str=uuid+stdoglob, input_string=uuid+"."+iglob, output_string=uuid+"."+oglob) if t.backend.server_type == Backend.GRIDENGINE: logger.info("Running At GRIDENGINE") run = geRunner(tmp_id=uuid, tmp_path=t.backend.root_path, out_globs=out_globs, command=t.executable, input_data=data_dict, flags=flags, options=options, std_out_str=uuid+stdoglob, input_string=uuid+"."+iglob, output_string=uuid+"."+oglob) except Exception as e: cr_message = "Unable to initialise commandRunner: "+str(e)+" : " + \ str(current_step) Submission.update_submission_state(s, True, state, step_id, self.request.id, cr_message) raise OSError(cr_message) try: run.prepare() except Exception as e: prep_message = "Unable to prepare files and tmp directory: "+str(e) + \ " : "+str(current_step) Submission.update_submission_state(s, True, state, step_id, self.request.id, prep_message) raise OSError(prep_message) try: logger.info("EXECUTABLE: "+run.command) run.prepare() exit_status = run.run_cmd() except Exception as e: run_message = "Unable to call commandRunner.run_cmd(): "+str(e) + \ " : "+str(current_step) Submission.update_submission_state(s, True, state, step_id, self.request.id, run_message) raise OSError(run_message) # if the command ran with success we'll send the file contents to the # database. # TODO: For now we write everything to the file as utf-8 but we'll need to # handle binary data eventually # if DEBUG settings are true we leave behind the temp working dir. if settings.DEBUG is not True: run.tidy() if exit_status == 0: file = None if run.output_data is not None: for fName, fData in run.output_data.items(): print("Writing Captured data") file = SimpleUploadedFile(fName, bytes(fData, 'utf-8')) r = Result.objects.create(submission=s, task=t, step=current_step, name=t.name, message='Result', previous_step=previous_step, result_data=file) else: r = Result.objects.create(submission=s, task=t, step=current_step, name=t.name, message='Result', previous_step=previous_step, result_data=None) else: Submission.update_submission_state(s, True, Submission.ERROR, step_id, self.request.id, 'Failed step, non 0 exit at step:' + str(step_id)) logger.error("Command did not run: "+run.command) raise OSError("Command did not run: "+run.command) # Update where we are in the steps to the submission table state = Submission.RUNNING message = "Completed step: " + str(current_step) if current_step == total_steps: state = Submission.COMPLETE message = 'Completed job at step #' + str(current_step) # TODO: This needs a try-catch try: if s.email is not None and \ len(s.email) > 5 and \ settings.DEFAULT_FROM_EMAIL is not None: send_mail(settings.EMAIL_SUBJECT_STRING+": "+uuid, settings.EMAIL_MESSAGE_STRING+uuid, from_email=None, recipient_list=[s.email], fail_silently=False) logger.info("SENDING MAIL TO: "+s.email) except Exception as e: logger.info("Mail server not available:" + str(e)) Submission.update_submission_state(s, True, state, step_id, self.request.id, message)