def results(self, raw): results = [] try: res = raw() except Exception as e: results.extend([ Result( identifier=i, value=result.error(exception=e), ) for i in self.identifiers ]) else: assert len(res) == len(self.identifiers) for (identifier, (failure, data)) in zip(self.identifiers, res): if failure: results.append( Result(identifier=identifier, value=result.error(exception=data))) else: results.append( Result(identifier=identifier, value=result.success(value=data))) return results
class PostprocessingState(object): """ A job that is trying to postprocess """ def __init__(self, job): self.job = job def is_alive(self): return False def initiate_postprocessing(self, job): raise RuntimeError, "initiate_postprocess called second time" def perform_postprocessing(self, job): try: value = result.success(value=job.unit.finalize(job=self.job)) except ProcessingException, e: raise except Exception, e: value = result.error(exception=e)
def job_cycle(outqueue, jobid, target, args, kwargs): try: value = target( *args, **kwargs ) except Exception, e: res = result.error( exception = e, traceback = result.get_traceback_info() )
def initiate_postprocessing(self, job): self.job.join() exit_code = getattr( self.job, "exitcode", 0 ) # Thread has no "exitcode" attribute if exit_code == 0: job.status = PostprocessingState( job = self.job ) else: err = getattr( self.job, "err", RuntimeError( "exit code = %s" % exit_code ) ) job.status = ValueState( value = result.error( exception = err ) )
def initiate_postprocessing(self, job): self.job.join() exit_code = getattr( self.job, "exitcode", 0 ) # Thread has no "exitcode" attribute if exit_code == 0: job.status = PostprocessingState( job = self.job ) else: err = getattr( self.job, "err", RuntimeError( "exit code = %s" % exit_code ) ) job.status = ValueState( value = result.error( exception = err ) )
def record_process_crash(self, pid, exception): if pid not in self.running_on: raise RuntimeError, "Unknown processID" jobid = self.running_on[ pid ] if jobid is not None: self.results.append( ( jobid, result.error( exception = exception ) ) ) del self.running_on[ pid ] self.terminateds.append( pid )
def record_process_crash(self, pid, exception): if pid not in self.running_on: raise RuntimeError("Unknown processID") jobid = self.running_on[pid] if jobid is not None: self.results.append((jobid, result.error(exception=exception))) del self.running_on[pid] self.terminateds.append(pid)
def job_cycle(outqueue, jobid, target, args, kwargs): try: value = target(*args, **kwargs) except Exception as e: res = result.error(exception=e, traceback=result.get_traceback_info()) else: res = result.success(value=value) outqueue.put((jobid, res))
def perform_postprocessing(self, job): try: value = result.success(value=job.unit.finalize(job=self.job)) except ProcessingException as e: raise except Exception as e: value = result.error(exception=e) job.status = ValueState(value=value)
def pool_process_cycle( pid, inqueue, outqueue, waittime, lifecycle, termination_signal, idle_timeout, ): controller = lifecycle() outqueue.put((worker_startup_event, pid)) last_activity = time.time() while controller.active(): if last_activity + idle_timeout < time.time(): outqueue.put((worker_termination_event, pid)) break try: data = inqueue.get(timeout=waittime) except Empty: continue if data == termination_signal: outqueue.put((worker_shutdown_event, pid)) break assert len(data) == 4 (jobid, target, args, kwargs) = data outqueue.put((job_started_event, (jobid, pid))) controller.record_job_start() try: value = target(*args, **kwargs) except Exception as e: res = result.error(exception=e, traceback=result.get_traceback_info()) else: res = result.success(value=value) outqueue.put((job_finished_event, (jobid, pid, res))) controller.record_job_end() last_activity = time.time() else: outqueue.put((worker_termination_event, pid))
def record_process_crash(self, pid, exception, traceback): if pid not in self.running_on: raise SchedulingError, "Unknown processID" jobid = self.running_on[pid] if jobid is not None: self.results.append( (jobid, result.error(exception=exception, traceback=traceback))) del self.running_on[pid] self.terminateds.append(pid)
def results(self, raw): results = [] try: res = raw() except Exception, e: results.extend([ Result( identifier=i, value=result.error(exception=e), ) for i in self.identifiers ])
def __call__(self): from libtbx.scheduling import result results = [] for ( target, args, kwargs ) in self.calculations: try: value = target( *args, **kwargs ) except Exception, e: results.append( result.error( exception = e ) ) else: results.append( result.success( value = value ) )
def __call__(self): from libtbx.scheduling import result results = [] for (target, args, kwargs) in self.calculations: try: value = target(*args, **kwargs) except Exception, e: results.append(result.error(exception=e)) else: results.append(result.success(value=value))
def poll(self): if self.waiting_jobs: current = self.waiting_jobs.popleft() try: value = current.target( *current.args, **current.kwargs ) except Exception, e: res = result.error( exception = e ) else: res = result.success( value = value ) self.completed_results.append( Result( identifier = current, value = res ) )
def poll(self): if self.waiting_jobs: current = self.waiting_jobs.popleft() try: value = current.target( *current.args, **current.kwargs ) except Exception as e: res = result.error( exception = e ) else: res = result.success( value = value ) self.completed_results.append( Result( identifier = current, value = res ) )
def poll(self): if self.outqueue: ( jobid, target, args, kwargs ) = self.outqueue.popleft() try: value = target( *args, **kwargs ) except Exception, e: res = result.error( exception = e, traceback = result.get_traceback_info() ) else: res = result.success( value = value ) self.inqueue.append( ( jobid, res ) )
def poll(self, block): if self.exitcode is None: try: self.result = self.worker.get(block=block) self.exitcode = 0 except Empty: return True except Exception as e: self.result = result.error(exception=e) self.exitcode = 1 self.err = e return False
def poll(self): if self.outqueue: (jobid, target, args, kwargs) = self.outqueue.popleft() try: value = target(*args, **kwargs) except Exception, e: res = result.error(exception=e, traceback=result.get_traceback_info()) else: res = result.success(value=value) self.inqueue.append((jobid, res))
def poll(self, block): if self.exitcode is None: from Queue import Empty try: self.result = self.worker.get( block = block ) self.exitcode = 0 except Empty: return True except Exception, e: self.result = result.error( exception = e ) self.exitcode = 1 self.err = e
def poll(self, block): if self.exitcode is None: from Queue import Empty try: self.result = self.worker.get(block=block) self.exitcode = 0 except Empty: return True except Exception, e: self.result = result.error(exception=e) self.exitcode = 1 self.err = e
def pool_process_cycle( pid, inqueue, outqueue, waittime, lifecycle, termination_signal, idle_timeout, ): controller = lifecycle() outqueue.put( ( worker_startup_event, pid ) ) last_activity = time.time() while controller.active(): if last_activity + idle_timeout < time.time(): outqueue.put( ( worker_termination_event, pid ) ) break try: data = inqueue.get( timeout = waittime ) except Empty: continue if data == termination_signal: outqueue.put( ( worker_shutdown_event, pid ) ) break assert len( data ) == 4 ( jobid, target, args, kwargs ) = data outqueue.put( ( job_started_event, ( jobid, pid ) ) ) controller.record_job_start() try: value = target( *args, **kwargs ) except Exception, e: res = result.error( exception = e, traceback = result.get_traceback_info() ) else: res = result.success( value = value ) outqueue.put( ( job_finished_event, ( jobid, pid, res ) ) ) controller.record_job_end() last_activity = time.time()
def finish_job(self, jobid): process = self.process_data_for[ jobid ] process.join() exit_code = getattr( process, "exitcode", 0 ) # Thread has no "exitcode" attribute if exit_code != 0: res = result.error( exception = result.get_exception( process = process, exit_code = exit_code ), traceback = result.get_crash_info( process = process ), ) self.completed_results.append( ( jobid, res ) ) else: self.waiting_results.add( jobid ) del self.process_data_for[ jobid ]
def results(self, raw): results = [] try: res = raw() except Exception, e: results.extend( [ Result( identifier = i, value = result.error( exception = e ), ) for i in self.identifiers ] )
[ Result( identifier = i, value = result.error( exception = e ), ) for i in self.identifiers ] ) else: assert len( res ) == len( self.identifiers ) for ( identifier, ( failure, data ) ) in zip( self.identifiers, res ): if failure: results.append( Result( identifier = identifier, value = result.error( exception = data ) ) ) else: results.append( Result( identifier = identifier, value = result.success( value = data ) ) ) return results class Pooler(object): """ Pools up a number of jobs and runs this way pool - number of jobs to pool
except Exception, e: results.extend([ Result( identifier=i, value=result.error(exception=e), ) for i in self.identifiers ]) else: assert len(res) == len(self.identifiers) for (identifier, (failure, data)) in zip(self.identifiers, res): if failure: results.append( Result(identifier=identifier, value=result.error(exception=data))) else: results.append( Result(identifier=identifier, value=result.success(value=data))) return results class Pooler(object): """ Pools up a number of jobs and runs this way pool - number of jobs to pool """