def display_peek(self, dataset): try: return dataset.peek except Exception: return "Tabular SparseVector file (%s)" % ( data.nice_size(dataset.get_size()) )
def set_peek(self, dataset, is_multi_byte=False): if not dataset.dataset.purged: dataset.peek = "gSpan" dataset.blurb = data.nice_size(dataset.get_size()) else: dataset.peek = "file does not exist" dataset.blurb = "file purged from disk"
def set_peek(self, dataset, is_multi_byte=False): if not dataset.dataset.purged: dataset.peek = "Binary sra file" dataset.blurb = nice_size(dataset.get_size()) else: dataset.peek = "file does not exist" dataset.blurb = "file purged from disk"
def run_job( self, job_wrapper ): job_wrapper.set_runner( 'local:///', None ) stderr = stdout = command_line = '' # Prepare the job to run try: job_wrapper.prepare() command_line = self.build_command_line( job_wrapper ) except: log.exception("failure running job %d" % job_wrapper.job_id) job_wrapper.fail( "failure preparing job", exception=True ) return # If we were able to get a command line, run the job if command_line: try: log.debug( 'executing: %s' % command_line ) stdout_file = tempfile.NamedTemporaryFile( suffix='_stdout', dir=job_wrapper.working_directory ) stderr_file = tempfile.NamedTemporaryFile( suffix='_stderr', dir=job_wrapper.working_directory ) proc = subprocess.Popen( args = command_line, shell = True, cwd = job_wrapper.working_directory, stdout = stdout_file, stderr = stderr_file, env = os.environ, preexec_fn = os.setpgrp ) job_wrapper.set_runner( 'local:///', proc.pid ) job_wrapper.change_state( model.Job.states.RUNNING ) if self.app.config.output_size_limit > 0: sleep_time = 1 while proc.poll() is None: for outfile, size in job_wrapper.check_output_sizes(): if size > self.app.config.output_size_limit: # Error the job immediately job_wrapper.fail( 'Job output grew too large (greater than %s), please try different job parameters or' \ % nice_size( self.app.config.output_size_limit ) ) log.warning( 'Terminating job %s due to output %s growing larger than %s limit' \ % ( job_wrapper.job_id, os.path.basename( outfile ), nice_size( self.app.config.output_size_limit ) ) ) # Then kill it os.killpg( proc.pid, 15 ) sleep( 1 ) if proc.poll() is None: os.killpg( proc.pid, 9 ) proc.wait() # reap log.debug( 'Job %s (pid %s) terminated' % ( job_wrapper.job_id, proc.pid ) ) return sleep( sleep_time ) if sleep_time < 8: # So we don't stat every second sleep_time *= 2 proc.wait() # reap stdout_file.seek( 0 ) stderr_file.seek( 0 ) stdout = stdout_file.read( 32768 ) stderr = stderr_file.read( 32768 ) stdout_file.close() stderr_file.close() log.debug('execution finished: %s' % command_line) except Exception, exc: job_wrapper.fail( "failure running job", exception=True ) log.exception("failure running job %d" % job_wrapper.job_id) return
def set_peek( self, dataset, is_multi_byte=False ): if not dataset.dataset.purged: dataset.peek = "Audio (MP3) file" dataset.blurb = data.nice_size( dataset.get_size() ) else: dataset.peek = 'file does not exist' dataset.blurb = 'file purged from disk'
def run_job( self, job_wrapper ): job_wrapper.set_runner( 'local:///', None ) stderr = stdout = command_line = '' # Prepare the job to run try: job_wrapper.prepare() command_line = job_wrapper.get_command_line() except: job_wrapper.fail( "failure preparing job", exception=True ) log.exception("failure running job %d" % job_wrapper.job_id) return # If we were able to get a command line, run the job if command_line: env = os.environ if job_wrapper.galaxy_lib_dir is not None: if 'PYTHONPATH' in os.environ: env['PYTHONPATH'] = "%s:%s" % ( os.environ['PYTHONPATH'], job_wrapper.galaxy_lib_dir ) else: env['PYTHONPATH'] = job_wrapper.galaxy_lib_dir try: log.debug( 'executing: %s' % command_line ) proc = subprocess.Popen( args = command_line, shell = True, cwd = job_wrapper.working_directory, stdout = subprocess.PIPE, stderr = subprocess.PIPE, env = env, preexec_fn = os.setpgrp ) job_wrapper.set_runner( 'local:///', proc.pid ) job_wrapper.change_state( model.Job.states.RUNNING ) if self.app.config.output_size_limit > 0: sleep_time = 1 while proc.poll() is None: for outfile, size in job_wrapper.check_output_sizes(): if size > self.app.config.output_size_limit: # Error the job immediately job_wrapper.fail( 'Job output grew too large (greater than %s), please try different job parameters or' \ % nice_size( self.app.config.output_size_limit ) ) log.warning( 'Terminating job %s due to output %s growing larger than %s limit' \ % ( job_wrapper.job_id, os.path.basename( outfile ), nice_size( self.app.config.output_size_limit ) ) ) # Then kill it os.killpg( proc.pid, 15 ) sleep( 1 ) if proc.poll() is None: os.killpg( proc.pid, 9 ) proc.wait() # reap log.debug( 'Job %s (pid %s) terminated' % ( job_wrapper.job_id, proc.pid ) ) return sleep( sleep_time ) if sleep_time < 8: # So we don't stat every second sleep_time *= 2 stdout = proc.stdout.read() stderr = proc.stderr.read() proc.wait() # reap log.debug('execution finished: %s' % command_line) except Exception, exc: job_wrapper.fail( "failure running job", exception=True ) log.exception("failure running job %d" % job_wrapper.job_id) return
def set_peek(self, dataset, is_multi_byte=False): if not dataset.dataset.purged: dataset.peek = 'Binary sra file' dataset.blurb = nice_size(dataset.get_size()) else: dataset.peek = 'file does not exist' dataset.blurb = 'file purged from disk'
def set_peek(self, dataset, is_multi_byte=False): if not dataset.dataset.purged: dataset.peek = "Thermo Finnigan RAW file" dataset.blurb = data.nice_size(dataset.get_size()) else: dataset.peek = 'file does not exist' dataset.blurb = 'file purged from disk'
def set_peek(self, dataset, is_multi_byte=False): if not dataset.dataset.purged: dataset.peek = "SparseVector" dataset.blurb = data.nice_size(dataset.get_size()) else: dataset.peek = 'file does not exist' dataset.blurb = 'file purged from disk'
def memory( since=0.0, pretty=False ): '''Return memory usage in bytes. ''' size = _VmB( 'VmSize:' ) - since if pretty: return nice_size( size ) else: return size
def run_job(self, job_wrapper): job_wrapper.set_runner('local:///', None) stderr = stdout = command_line = '' # Prepare the job to run try: job_wrapper.prepare() command_line = job_wrapper.get_command_line() except: job_wrapper.fail("failure preparing job", exception=True) log.exception("failure running job %d" % job_wrapper.job_id) return # If we were able to get a command line, run the job if command_line: try: log.debug('executing: %s' % command_line) proc = subprocess.Popen(args=command_line, shell=True, cwd=job_wrapper.working_directory, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=os.environ, preexec_fn=os.setpgrp) job_wrapper.set_runner('local:///', proc.pid) job_wrapper.change_state(model.Job.states.RUNNING) if self.app.config.output_size_limit > 0: sleep_time = 1 while proc.poll() is None: for outfile, size in job_wrapper.check_output_sizes(): if size > self.app.config.output_size_limit: # Error the job immediately job_wrapper.fail( 'Job output grew too large (greater than %s), please try different job parameters or' \ % nice_size( self.app.config.output_size_limit ) ) log.warning( 'Terminating job %s due to output %s growing larger than %s limit' \ % ( job_wrapper.job_id, os.path.basename( outfile ), nice_size( self.app.config.output_size_limit ) ) ) # Then kill it os.killpg(proc.pid, 15) sleep(1) if proc.poll() is None: os.killpg(proc.pid, 9) proc.wait() # reap log.debug('Job %s (pid %s) terminated' % (job_wrapper.job_id, proc.pid)) return sleep(sleep_time) if sleep_time < 8: # So we don't stat every second sleep_time *= 2 stdout = proc.stdout.read() stderr = proc.stderr.read() proc.wait() # reap log.debug('execution finished: %s' % command_line) except Exception, exc: job_wrapper.fail("failure running job", exception=True) log.exception("failure running job %d" % job_wrapper.job_id) return
def display_peek(self, dataset): try: return dataset.peek except Exception: return "Ply file (%s)" % (nice_size(dataset.get_size()))
def run_job( self, job_wrapper ): # Do not run the job if something happened to its state while it was # enqueued. (For example, a task could have been cancelled and does not # need to be run.) if model.Job.states.QUEUED != job_wrapper.get_state(): log.debug( "Local runner: job %s is in state %s and will not be run" % ( job_wrapper.get_id_tag(), job_wrapper.get_state() ) ) else: log.debug( "Local runner: starting job %s" % job_wrapper.get_id_tag() ) job_wrapper.set_runner( 'local:///', None ) stderr = stdout = command_line = '' exit_code = 0 # Prepare the job to run try: job_wrapper.prepare() command_line = self.build_command_line( job_wrapper ) except: log.exception("failure running job %d" % job_wrapper.job_id) job_wrapper.fail( "failure preparing job", exception=True ) return # If we were able to get a command line, run the job if command_line: try: log.debug( 'executing: %s' % command_line ) stdout_file = tempfile.NamedTemporaryFile( suffix='_stdout', dir=job_wrapper.working_directory ) stderr_file = tempfile.NamedTemporaryFile( suffix='_stderr', dir=job_wrapper.working_directory ) proc = subprocess.Popen( args = command_line, shell = True, cwd = job_wrapper.working_directory, stdout = stdout_file, stderr = stderr_file, env = os.environ, preexec_fn = os.setpgrp ) job_wrapper.set_runner( 'local:///', proc.pid ) job_wrapper.change_state( model.Job.states.RUNNING ) sleep_time = 1 job_start = datetime.datetime.now() while proc.poll() is None: if self.app.config.output_size_limit > 0: for outfile, size in job_wrapper.check_output_sizes(): if size > self.app.config.output_size_limit: # Error the job immediately job_wrapper.fail( 'Job output grew too large (greater than %s), please try different job parameters' \ % nice_size( self.app.config.output_size_limit ) ) log.warning( 'Terminating job %s due to output %s growing larger than %s limit' \ % ( job_wrapper.job_id, os.path.basename( outfile ), nice_size( self.app.config.output_size_limit ) ) ) # Then kill it self._terminate( proc ) log.debug( 'Job %s (pid %s) terminated' % ( job_wrapper.job_id, proc.pid ) ) return sleep( sleep_time ) if self.app.config.job_walltime_delta is not None: time_executing = datetime.datetime.now() - job_start if time_executing > self.app.config.job_walltime_delta: # Error the job immediately job_wrapper.fail( 'Job ran longer than maximum allowed execution time (%s), please try different job parameters' \ % self.app.config.job_walltime ) log.warning( 'Terminating job %s since walltime has been reached' % job_wrapper.job_id ) # Then kill it self._terminate( proc ) log.debug( 'Job %s (pid %s) terminated' % ( job_wrapper.job_id, proc.pid ) ) return if sleep_time < 8: # So we don't stat every second sleep_time *= 2 # Reap the process and get the exit code. exit_code = proc.wait() stdout_file.seek( 0 ) stderr_file.seek( 0 ) stdout = stdout_file.read( 32768 ) stderr = stderr_file.read( 32768 ) stdout_file.close() stderr_file.close() log.debug('execution finished: %s' % command_line) except Exception, exc: job_wrapper.fail( "failure running job", exception=True ) log.exception("failure running job %d" % job_wrapper.job_id) return #run the metadata setting script here #this is terminate-able when output dataset/job is deleted #so that long running set_meta()s can be canceled without having to reboot the server if job_wrapper.get_state() not in [ model.Job.states.ERROR, model.Job.states.DELETED ] and self.app.config.set_metadata_externally and job_wrapper.output_paths: external_metadata_script = job_wrapper.setup_external_metadata( output_fnames = job_wrapper.get_output_fnames(), set_extension = True, tmp_dir = job_wrapper.working_directory, kwds = { 'overwrite' : False } ) #we don't want to overwrite metadata that was copied over in init_meta(), as per established behavior log.debug( 'executing external set_meta script for job %d: %s' % ( job_wrapper.job_id, external_metadata_script ) ) external_metadata_proc = subprocess.Popen( args = external_metadata_script, shell = True, env = os.environ, preexec_fn = os.setpgrp ) job_wrapper.external_output_metadata.set_job_runner_external_pid( external_metadata_proc.pid, self.sa_session ) external_metadata_proc.wait() log.debug( 'execution of external set_meta for job %d finished' % job_wrapper.job_id ) # Finish the job! try: job_wrapper.finish( stdout, stderr, exit_code ) except: log.exception("Job wrapper finish method failed") job_wrapper.fail("Unable to finish job", exception=True)
def display_peek(self, dataset): try: return dataset.peek except: return "Thermo Finnigan RAW file (%s)" % (data.nice_size( dataset.get_size()))
def check_watched_items(self): """ Called by the monitor thread to look at each watched job and deal with state changes. """ new_watched = [] # reduce pbs load by batching status queries (failures, statuses) = self.check_all_jobs() for pbs_job_state in self.watched: job_id = pbs_job_state.job_id galaxy_job_id = pbs_job_state.job_wrapper.job_id old_state = pbs_job_state.old_state pbs_server_name = self.determine_pbs_server( pbs_job_state.runner_url) if pbs_server_name in failures: log.debug( "(%s/%s) Skipping state check because PBS server connection failed" % (galaxy_job_id, job_id)) new_watched.append(pbs_job_state) continue try: status = statuses[job_id] except KeyError: try: # Recheck to make sure it wasn't a communication problem self.check_single_job(pbs_server_name, job_id) log.warning( "(%s/%s) PBS job was not in state check list, but was found with individual state check" % (galaxy_job_id, job_id)) new_watched.append(pbs_job_state) except: errno, text = pbs.error() if errno == 15001: # 15001 == job not in queue log.debug("(%s/%s) PBS job has left queue" % (galaxy_job_id, job_id)) self.work_queue.put(('finish', pbs_job_state)) else: # Unhandled error, continue to monitor log.info( "(%s/%s) PBS state check resulted in error (%d): %s" % (galaxy_job_id, job_id, errno, text)) new_watched.append(pbs_job_state) continue if status.job_state != old_state: log.debug("(%s/%s) PBS job state changed from %s to %s" % (galaxy_job_id, job_id, old_state, status.job_state)) if status.job_state == "R" and not pbs_job_state.running: pbs_job_state.running = True pbs_job_state.job_wrapper.change_state( model.Job.states.RUNNING) if status.job_state == "R" and (pbs_job_state.check_count % 20) == 0: # Every 20th time the job status is checked, do limit checks (if configured) if self.app.config.output_size_limit > 0: # Check the size of the job outputs fail = False for outfile, size in pbs_job_state.job_wrapper.check_output_sizes( ): if size > self.app.config.output_size_limit: pbs_job_state.fail_message = 'Job output grew too large (greater than %s), please try different job parameters or' \ % nice_size( self.app.config.output_size_limit ) log.warning( '(%s/%s) Dequeueing job due to output %s growing larger than %s limit' \ % ( galaxy_job_id, job_id, os.path.basename( outfile ), nice_size( self.app.config.output_size_limit ) ) ) pbs_job_state.stop_job = True self.work_queue.put(('fail', pbs_job_state)) fail = True break if fail: continue if self.job_walltime is not None: # Check the job's execution time if status.get('resources_used', False): # resources_used may not be in the status for new jobs h, m, s = [ int(i) for i in status.resources_used.walltime.split(':') ] time_executing = timedelta(0, s, 0, 0, m, h) if time_executing > self.job_walltime: pbs_job_state.fail_message = 'Job ran longer than maximum allowed execution time (%s), please try different job parameters or' \ % self.app.config.job_walltime log.warning( '(%s/%s) Dequeueing job since walltime has been reached' \ % ( galaxy_job_id, job_id ) ) pbs_job_state.stop_job = True self.work_queue.put(('fail', pbs_job_state)) continue elif status.job_state == "C": # "keep_completed" is enabled in PBS, so try to check exit status try: assert int(status.exit_status) == 0 log.debug("(%s/%s) PBS job has completed successfully" % (galaxy_job_id, job_id)) except AssertionError: pbs_job_state.fail_message = 'Job cannot be completed due to a cluster error, please retry it later' log.error('(%s/%s) PBS job failed: %s' % (galaxy_job_id, job_id, JOB_EXIT_STATUS.get( int(status.exit_status), 'Unknown error: %s' % status.exit_status))) self.work_queue.put(('fail', pbs_job_state)) continue except AttributeError: # No exit_status, can't verify proper completion so we just have to assume success. log.debug("(%s/%s) PBS job has completed" % (galaxy_job_id, job_id)) self.work_queue.put(('finish', pbs_job_state)) continue pbs_job_state.old_state = status.job_state new_watched.append(pbs_job_state) # Replace the watch list with the updated version self.watched = new_watched
def display_peek( self, dataset ): try: return dataset.peek except: return "Media (OGG) file (%s)" % ( data.nice_size( dataset.get_size() ) )
def display_peek(self, dataset): try: return dataset.peek except: return "Vtk file (%s)" % (nice_size(dataset.get_size()))
def display_peek(self, dataset): try: return dataset.peek except: return 'Binary sra file (%s)' % (nice_size(dataset.get_size()))
def display_peek(self, dataset): try: return dataset.peek except: return "HMMER database (%s)" % ( nice_size( dataset.get_size() ) )
def display_peek( self, dataset ): try: return dataset.peek except: return "Tabular SparseVector file (%s)" % ( data.nice_size( dataset.get_size() ) )
def display_peek(self, dataset): try: return dataset.peek except: return "Thermo Finnigan RAW file (%s)" % (data.nice_size(dataset.get_size()))
def display_peek( self, dataset ): try: return dataset.peek except: return "Video (AVI) file (%s)" % ( data.nice_size( dataset.get_size() ) )
def check_watched_items( self ): """ Called by the monitor thread to look at each watched job and deal with state changes. """ new_watched = [] # reduce pbs load by batching status queries ( failures, statuses ) = self.check_all_jobs() for pbs_job_state in self.watched: job_id = pbs_job_state.job_id galaxy_job_id = pbs_job_state.job_wrapper.job_id old_state = pbs_job_state.old_state pbs_server_name = self.determine_pbs_server( pbs_job_state.runner_url ) if pbs_server_name in failures: log.debug( "(%s/%s) Skipping state check because PBS server connection failed" % ( galaxy_job_id, job_id ) ) new_watched.append( pbs_job_state ) continue if statuses.has_key( job_id ): status = statuses[job_id] if status.job_state != old_state: log.debug("(%s/%s) job state changed from %s to %s" % ( galaxy_job_id, job_id, old_state, status.job_state ) ) if status.job_state == "R" and not pbs_job_state.running: pbs_job_state.running = True pbs_job_state.job_wrapper.change_state( model.Job.states.RUNNING ) if status.job_state == "R" and ( pbs_job_state.check_count % 20 ) == 0: # Every 20th time the job status is checked, do limit checks (if configured) if self.app.config.output_size_limit > 0: # Check the size of the job outputs fail = False for outfile, size in pbs_job_state.job_wrapper.check_output_sizes(): if size > self.app.config.output_size_limit: pbs_job_state.fail_message = 'Job output grew too large (greater than %s), please try different job parameters or' \ % nice_size( self.app.config.output_size_limit ) log.warning( '(%s/%s) Dequeueing job due to output %s growing larger than %s limit' \ % ( galaxy_job_id, job_id, os.path.basename( outfile ), nice_size( self.app.config.output_size_limit ) ) ) self.work_queue.put( ( 'fail', pbs_job_state ) ) fail = True break if fail: continue if self.job_walltime is not None: # Check the job's execution time if status.get( 'resources_used', False ): # resources_used may not be in the status for new jobs h, m, s = [ int( i ) for i in status.resources_used.walltime.split( ':' ) ] time_executing = timedelta( 0, s, 0, 0, m, h ) if time_executing > self.job_walltime: pbs_job_state.fail_message = 'Job ran longer than maximum allowed execution time (%s), please try different job parameters or' \ % self.app.config.job_walltime log.warning( '(%s/%s) Dequeueing job since walltime has been reached' \ % ( galaxy_job_id, job_id ) ) self.work_queue.put( ( 'fail', pbs_job_state ) ) continue pbs_job_state.old_state = status.job_state new_watched.append( pbs_job_state ) else: try: # recheck to make sure it wasn't a communication problem self.check_single_job( pbs_server_name, job_id ) log.warning( "(%s/%s) job was not in state check list, but was found with individual state check" ) new_watched.append( pbs_job_state ) except: errno, text = pbs.error() if errno != 15001: log.info("(%s/%s) state check resulted in error (%d): %s" % (galaxy_job_id, job_id, errno, text) ) new_watched.append( pbs_job_state ) else: log.debug("(%s/%s) job has left queue" % (galaxy_job_id, job_id) ) self.work_queue.put( ( 'finish', pbs_job_state ) ) # Replace the watch list with the updated version self.watched = new_watched
def display_peek( self, dataset ): try: return dataset.peek except: return "Matlab Binary file (%s)" % ( data.nice_size( dataset.get_size() ) )
def display_peek(self, dataset): try: return dataset.peek except: return "Tabular gSpan file (%s)" % (data.nice_size( dataset.get_size()))
def display_peek( self, dataset ): try: return dataset.peek except: return "Audio (MP3) file (%s)" % ( data.nice_size( dataset.get_size() ) )
def check_watched_items( self ): """ Called by the monitor thread to look at each watched job and deal with state changes. """ new_watched = [] # reduce pbs load by batching status queries ( failures, statuses ) = self.check_all_jobs() for pbs_job_state in self.watched: job_id = pbs_job_state.job_id #galaxy_job_id = pbs_job_state.job_wrapper.job_id galaxy_job_id = pbs_job_state.job_wrapper.get_id_tag() old_state = pbs_job_state.old_state pbs_server_name = self.determine_pbs_server( pbs_job_state.runner_url ) if pbs_server_name in failures: log.debug( "(%s/%s) Skipping state check because PBS server connection failed" % ( galaxy_job_id, job_id ) ) new_watched.append( pbs_job_state ) continue try: status = statuses[job_id] except KeyError: try: # Recheck to make sure it wasn't a communication problem self.check_single_job( pbs_server_name, job_id ) log.warning( "(%s/%s) PBS job was not in state check list, but was found with individual state check" % ( galaxy_job_id, job_id ) ) new_watched.append( pbs_job_state ) except: errno, text = pbs.error() if errno == 15001: # 15001 == job not in queue log.debug("(%s/%s) PBS job has left queue" % (galaxy_job_id, job_id) ) self.work_queue.put( ( 'finish', pbs_job_state ) ) else: # Unhandled error, continue to monitor log.info("(%s/%s) PBS state check resulted in error (%d): %s" % (galaxy_job_id, job_id, errno, text) ) new_watched.append( pbs_job_state ) continue if status.job_state != old_state: log.debug("(%s/%s) PBS job state changed from %s to %s" % ( galaxy_job_id, job_id, old_state, status.job_state ) ) if status.job_state == "R" and not pbs_job_state.running: pbs_job_state.running = True pbs_job_state.job_wrapper.change_state( model.Job.states.RUNNING ) if status.job_state == "R" and ( pbs_job_state.check_count % 20 ) == 0: # Every 20th time the job status is checked, do limit checks (if configured) if self.app.config.output_size_limit > 0: # Check the size of the job outputs fail = False for outfile, size in pbs_job_state.job_wrapper.check_output_sizes(): if size > self.app.config.output_size_limit: pbs_job_state.fail_message = 'Job output grew too large (greater than %s), please try different job parameters' \ % nice_size( self.app.config.output_size_limit ) log.warning( '(%s/%s) Dequeueing job due to output %s growing larger than %s limit' \ % ( galaxy_job_id, job_id, os.path.basename( outfile ), nice_size( self.app.config.output_size_limit ) ) ) pbs_job_state.stop_job = True self.work_queue.put( ( 'fail', pbs_job_state ) ) fail = True break if fail: continue if self.app.config.job_walltime_delta is not None: # Check the job's execution time if status.get( 'resources_used', False ): # resources_used may not be in the status for new jobs h, m, s = [ int( i ) for i in status.resources_used.walltime.split( ':' ) ] time_executing = timedelta( 0, s, 0, 0, m, h ) if time_executing > self.app.config.job_walltime_delta: pbs_job_state.fail_message = 'Job ran longer than maximum allowed execution time (%s), please try different job parameters' \ % self.app.config.job_walltime log.warning( '(%s/%s) Dequeueing job since walltime has been reached' \ % ( galaxy_job_id, job_id ) ) pbs_job_state.stop_job = True self.work_queue.put( ( 'fail', pbs_job_state ) ) continue elif status.job_state == "C": # "keep_completed" is enabled in PBS, so try to check exit status try: assert int( status.exit_status ) == 0 log.debug("(%s/%s) PBS job has completed successfully" % ( galaxy_job_id, job_id ) ) except AssertionError: pbs_job_state.fail_message = 'Job cannot be completed due to a cluster error, please retry it later' log.error( '(%s/%s) PBS job failed: %s' % ( galaxy_job_id, job_id, JOB_EXIT_STATUS.get( int( status.exit_status ), 'Unknown error: %s' % status.exit_status ) ) ) self.work_queue.put( ( 'fail', pbs_job_state ) ) continue except AttributeError: # No exit_status, can't verify proper completion so we just have to assume success. log.debug("(%s/%s) PBS job has completed" % ( galaxy_job_id, job_id ) ) self.work_queue.put( ( 'finish', pbs_job_state ) ) continue pbs_job_state.old_state = status.job_state new_watched.append( pbs_job_state ) # Replace the watch list with the updated version self.watched = new_watched
def display_peek( self, dataset ): try: return dataset.peek except: return "JSON file (%s)" % ( nice_size( dataset.get_size() ) )
def display_peek(self, dataset): try: return dataset.peek except Exception: return "Vtk file (%s)" % (nice_size(dataset.get_size()))