def close(self): '''This is where most of the work will be done - in here is where the script itself gets written and run, and the output file channel opened when the process has finished...''' # ensure that there is no .xstatus file here script_writer(self._working_directory, self._script_name, self._executable, self._script_command_line, self._working_environment, self._script_standard_input, mkdirs = self._scratch_directories) # call the queue submission - this will be overloaded self.submit() # now have a while loop watching for the .xstatus file # using os.path.exists() xstatus_file = os.path.join(self._working_directory, '%s.xstatus' % self._script_name) while True: if os.path.exists(xstatus_file): time.sleep(1) break time.sleep(5) try: self._script_status = int(open(xstatus_file, 'r').read()) except: self._script_status = 0 # set this up for reading the "standard output" of the job. self._output_file = open(os.path.join(self._working_directory, '%s.xout' % self._script_name), 'r') self.cleanup() return
def close(self): '''This is where most of the work will be done - in here is where the script itself gets written and run, and the output file channel opened when the process has finished...''' script_writer(self._working_directory, self._script_name, self._executable, self._script_command_line, self._working_environment, self._script_standard_input) if os.name == 'posix': pipe = subprocess.Popen(['bash', '%s.sh' % self._script_name], cwd = self._working_directory) else: pipe = subprocess.Popen(['%s.bat' % self._script_name], cwd = self._working_directory, shell = True) self._script_status = pipe.wait() # at this stage I should read the .xstatus file to determine if the # process has indeed finished - though it should have done... try: xstatus_file = os.path.join(self._working_directory, '%s.xstatus' % self._script_name) self._script_status = int(open(xstatus_file, 'r').read()) except: # this could happen on windows if the program in question # is a batch file... self._script_status = 0 self._output_file = open(os.path.join(self._working_directory, '%s.xout' % self._script_name), 'r') return
def close(self): '''This is where most of the work will be done - in here is where the script itself gets written and run, and the output file channel opened when the process has finished...''' script_writer(self._working_directory, self._script_name, self._executable, self._working_environment, self._script_command_line, self._script_standard_input) # this will return almost instantly, once the job is in # the queue pipe = subprocess.Popen(['qsub', '-V', '-cwd', '%s.sh' % self._script_name], cwd = self._working_directory, stdout = subprocess.PIPE, stderr = subprocess.PIPE) # this will get all of the output as a tuple (stdout, stderr) stdout, stderr = pipe.communicate() # check the standard error if len(stderr) > 0: # something probably went wrong if 'error opening' in stderr: raise RuntimeError, 'executable "%s" does not exist' % \ stdout.split('\n')[0].split(':')[0].replace( 'error opening ', '') # probably everything is ok then # the job id etc go to the standard output job_id = stdout.split('\n')[0].split()[2] # now have a while loop watching this job id via qstat -j while True: pipe = subprocess.Popen(['qstat', '-j', '%s' % job_id], cwd = self._working_directory, stdout = subprocess.PIPE, stderr = subprocess.PIPE) stdout, stderr = pipe.communicate() if 'Following jobs do not exist' in stderr: # then the job has finished break # sleep for 10 seconds time.sleep(10) # the following files may have some interesting contents - despite # the fact that all of the output was supposed to be piped to # the standard output... # oh - just looked. in the DriverHelper.script_writer method # the output is simply piped > not 2>&1 - which means that the # standard error output will appear below... sge_stdout = os.path.join(self._working_directory, '%s.sh.o%s' % (self._script_name, job_id)) sge_stderr = os.path.join(self._working_directory, '%s.sh.e%s' % (self._script_name, job_id)) # check the standard error file for any indications that # something went wrong running this job... error_output = open(sge_stderr, 'r').readlines() self.check_sge_errors(error_output) # it's nearly impossible to get the return status from qsub # so don't bother self._script_status = 0 # set this up for reading the "standard output" of the job. self._output_file = open(os.path.join(self._working_directory, '%s.xout' % self._script_name), 'r') # at this stage I should delete the sge specific files defined # above to be tidy... try: os.remove(sge_stdout) os.remove(sge_stderr) except: # something wrong with this deletion? pass return
def close(self): '''This is where most of the work will be done - in here is where the script itself gets written and run, and the output file channel opened when the process has finished...''' script_writer(self._working_directory, self._script_name, self._executable, self._working_environment, self._script_command_line, self._script_standard_input) # this will return almost instantly, once the job is in # the queue pipe = subprocess.Popen( ['qsub', '-V', '-cwd', '%s.sh' % self._script_name], cwd=self._working_directory, stdout=subprocess.PIPE, stderr=subprocess.PIPE) # this will get all of the output as a tuple (stdout, stderr) stdout, stderr = pipe.communicate() # check the standard error if len(stderr) > 0: # something probably went wrong if 'error opening' in stderr: raise RuntimeError, 'executable "%s" does not exist' % \ stdout.split('\n')[0].split(':')[0].replace( 'error opening ', '') # probably everything is ok then # the job id etc go to the standard output job_id = stdout.split('\n')[0].split()[2] # now have a while loop watching this job id via qstat -j while True: pipe = subprocess.Popen( ['qstat', '-j', '%s' % job_id], cwd=self._working_directory, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = pipe.communicate() if 'Following jobs do not exist' in stderr: # then the job has finished break # sleep for 10 seconds time.sleep(10) # the following files may have some interesting contents - despite # the fact that all of the output was supposed to be piped to # the standard output... # oh - just looked. in the DriverHelper.script_writer method # the output is simply piped > not 2>&1 - which means that the # standard error output will appear below... sge_stdout = os.path.join(self._working_directory, '%s.sh.o%s' % (self._script_name, job_id)) sge_stderr = os.path.join(self._working_directory, '%s.sh.e%s' % (self._script_name, job_id)) # check the standard error file for any indications that # something went wrong running this job... error_output = open(sge_stderr, 'r').readlines() self.check_sge_errors(error_output) # it's nearly impossible to get the return status from qsub # so don't bother self._script_status = 0 # set this up for reading the "standard output" of the job. self._output_file = open( os.path.join(self._working_directory, '%s.xout' % self._script_name), 'r') # at this stage I should delete the sge specific files defined # above to be tidy... try: os.remove(sge_stdout) os.remove(sge_stderr) except Exception: # something wrong with this deletion? pass return