def close(self): """This is where most of the work will be done - in here is where the script itself gets written and run, and the output file channel opened when the process has finished...""" # ensure that there is no .xstatus file here script_writer( self._working_directory, self._script_name, self._executable, self._script_command_line, self._working_environment, self._script_standard_input, mkdirs=self._scratch_directories, ) # call the queue submission - this will be overloaded self.submit() # now have a while loop watching for the .xstatus file # using os.path.exists() xstatus_file = os.path.join( self._working_directory, "%s.xstatus" % self._script_name ) while True: if os.path.exists(xstatus_file): time.sleep(1) break time.sleep(5) try: with open(xstatus_file, "r") as fh: self._script_status = int(fh.read()) except Exception: self._script_status = 0 # set this up for reading the "standard output" of the job. self._output_file = open( os.path.join(self._working_directory, "%s.xout" % self._script_name), "r" ) self.cleanup()
def close(self): """This is where most of the work will be done - in here is where the script itself gets written and run, and the output file channel opened when the process has finished...""" script_writer( self._working_directory, self._script_name, self._executable, self._script_command_line, self._working_environment, self._script_standard_input, ) if os.name == "posix": pipe = subprocess.Popen( ["bash", "%s.sh" % self._script_name], cwd=self._working_directory) else: pipe = subprocess.Popen(["%s.bat" % self._script_name], cwd=self._working_directory, shell=True) self._script_status = pipe.wait() # at this stage I should read the .xstatus file to determine if the # process has indeed finished - though it should have done... try: xstatus_file = os.path.join(self._working_directory, "%s.xstatus" % self._script_name) with open(xstatus_file) as fh: self._script_status = int(fh.read()) except Exception: # this could happen on windows if the program in question # is a batch file... self._script_status = 0 self._output_file = open( os.path.join(self._working_directory, "%s.xout" % self._script_name))
def close(self): """This is where most of the work will be done - in here is where the script itself gets written and run, and the output file channel opened when the process has finished...""" script_name = os.path.join("jobs", self._script_name) try: os.mkdir(os.path.join(self._working_directory, "jobs")) except OSError: if not os.path.exists(os.path.join(self._working_directory, "jobs")): raise # copy in LD_LIBRARY_PATH - SGE squashes this if ("LD_LIBRARY_PATH" in os.environ and "LD_LIBRARY_PATH" not in self._working_environment): self._working_environment["LD_LIBRARY_PATH"] = os.environ[ "LD_LIBRARY_PATH"].split(os.pathsep) script_writer( self._working_directory, script_name, self._executable, self._script_command_line, self._working_environment, self._script_standard_input, ) # this will return almost instantly, once the job is in # the queue qsub_command = get_qsub_command() if not qsub_command: qsub_command = "qsub" qsub_command = shlex.split(qsub_command) if self._cpu_threads > 1: pipe = subprocess.Popen( qsub_command + [ "-V", "-cwd", "-pe", "smp", "%d" % self._cpu_threads, "%s.sh" % script_name, ], cwd=self._working_directory, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) else: pipe = subprocess.Popen( qsub_command + ["-V", "-cwd", "%s.sh" % script_name], cwd=self._working_directory, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) # this will get all of the output as a tuple (stdout, stderr) stdout, stderr = pipe.communicate() # check the standard error if stderr: # something probably went wrong if "error opening" in stderr: raise RuntimeError('executable "%s" does not exist' % stdout.split("\n")[0].split(":")[0].replace( "error opening ", "")) # probably everything is ok then # the job id etc go to the standard output for record in stdout.split("\n"): if "Your job" in record: job_id = record.split()[2] # now have a while loop watching this job id via qstat -j while True: pipe = subprocess.Popen( ["qstat", "-j", "%s" % job_id], cwd=self._working_directory, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) stdout, stderr = pipe.communicate() if "Following jobs do not exist" in stderr: # then the job has finished break # sleep for 10 seconds time.sleep(10) # the following files may have some interesting contents - despite # the fact that all of the output was supposed to be piped to # the standard output... # oh - just looked. in the DriverHelper.script_writer method # the output is simply piped > not 2>&1 - which means that the # standard error output will appear below... sge_stdout = os.path.join(self._working_directory, "%s.sh.o%s" % (self._script_name, job_id)) sge_stderr = os.path.join(self._working_directory, "%s.sh.e%s" % (self._script_name, job_id)) sge_pstdout = os.path.join(self._working_directory, "%s.sh.po%s" % (self._script_name, job_id)) sge_pstderr = os.path.join(self._working_directory, "%s.sh.pe%s" % (self._script_name, job_id)) # check the standard error file for any indications that # something went wrong running this job... error_output = open(sge_stderr, "r").readlines() self.check_sge_errors(error_output) # it's nearly impossible to get the return status from qsub # so don't bother self._script_status = 0 # set this up for reading the "standard output" of the job. self._output_file = open( os.path.join(self._working_directory, "%s.xout" % script_name), "r") # at this stage I should delete the sge specific files defined # above to be tidy... try: os.remove(sge_stdout) os.remove(sge_stderr) if os.path.exists(sge_pstdout): os.remove(sge_pstdout) if os.path.exists(sge_pstderr): os.remove(sge_pstderr) except Exception: # something wrong with this deletion? pass