def perform_postprocessing(self, job): try: value = result.success(value=job.unit.finalize(job=self.job)) except ProcessingException, e: raise
def perform_postprocessing(self, job): try: value = result.success( value = job.unit.finalize( job = self.job ) ) except ProcessingException, e: raise
def results(self, raw): results = [] try: res = raw() except Exception as e: results.extend([ Result( identifier=i, value=result.error(exception=e), ) for i in self.identifiers ]) else: assert len(res) == len(self.identifiers) for (identifier, (failure, data)) in zip(self.identifiers, res): if failure: results.append( Result(identifier=identifier, value=result.error(exception=data))) else: results.append( Result(identifier=identifier, value=result.success(value=data))) return results
def record_job_finish(self, jobid, pid, value): if pid not in self.running_on: raise RuntimeError, "Unknown processID" if self.running_on[ pid ] != jobid: raise RuntimeError, "Inconsistent register information: jobid/pid mismatch" self.running_on[ pid ] = None self.results.append( ( jobid, result.success( value = value ) ) )
def record_job_finish(self, jobid, pid, value): if pid not in self.running_on: raise RuntimeError("Unknown processID") if self.running_on[ pid ] != jobid: raise RuntimeError("Inconsistent register information: jobid/pid mismatch") self.running_on[ pid ] = None self.results.append( ( jobid, result.success( value = value ) ) )
def job_cycle(outqueue, jobid, target, args, kwargs): try: value = target(*args, **kwargs) except Exception as e: res = result.error(exception=e, traceback=result.get_traceback_info()) else: res = result.success(value=value) outqueue.put((jobid, res))
def perform_postprocessing(self, job): try: value = result.success(value=job.unit.finalize(job=self.job)) except ProcessingException as e: raise except Exception as e: value = result.error(exception=e) job.status = ValueState(value=value)
def __call__(self): from libtbx.scheduling import result results = [] for ( target, args, kwargs ) in self.calculations: try: value = target( *args, **kwargs ) except Exception, e: results.append( result.error( exception = e ) ) else: results.append( result.success( value = value ) )
def __call__(self): from libtbx.scheduling import result results = [] for (target, args, kwargs) in self.calculations: try: value = target(*args, **kwargs) except Exception, e: results.append(result.error(exception=e)) else: results.append(result.success(value=value))
def pool_process_cycle( pid, inqueue, outqueue, waittime, lifecycle, termination_signal, idle_timeout, ): controller = lifecycle() outqueue.put((worker_startup_event, pid)) last_activity = time.time() while controller.active(): if last_activity + idle_timeout < time.time(): outqueue.put((worker_termination_event, pid)) break try: data = inqueue.get(timeout=waittime) except Empty: continue if data == termination_signal: outqueue.put((worker_shutdown_event, pid)) break assert len(data) == 4 (jobid, target, args, kwargs) = data outqueue.put((job_started_event, (jobid, pid))) controller.record_job_start() try: value = target(*args, **kwargs) except Exception as e: res = result.error(exception=e, traceback=result.get_traceback_info()) else: res = result.success(value=value) outqueue.put((job_finished_event, (jobid, pid, res))) controller.record_job_end() last_activity = time.time() else: outqueue.put((worker_termination_event, pid))
def poll(self): if self.waiting_jobs: current = self.waiting_jobs.popleft() try: value = current.target( *current.args, **current.kwargs ) except Exception as e: res = result.error( exception = e ) else: res = result.success( value = value ) self.completed_results.append( Result( identifier = current, value = res ) )
def poll(self): if self.outqueue: ( jobid, target, args, kwargs ) = self.outqueue.popleft() try: value = target( *args, **kwargs ) except Exception, e: res = result.error( exception = e, traceback = result.get_traceback_info() ) else: res = result.success( value = value ) self.inqueue.append( ( jobid, res ) )
def poll(self): if self.waiting_jobs: current = self.waiting_jobs.popleft() try: value = current.target( *current.args, **current.kwargs ) except Exception, e: res = result.error( exception = e ) else: res = result.success( value = value ) self.completed_results.append( Result( identifier = current, value = res ) )
def poll(self): if self.outqueue: (jobid, target, args, kwargs) = self.outqueue.popleft() try: value = target(*args, **kwargs) except Exception, e: res = result.error(exception=e, traceback=result.get_traceback_info()) else: res = result.success(value=value) self.inqueue.append((jobid, res))
def pool_process_cycle( pid, inqueue, outqueue, waittime, lifecycle, termination_signal, idle_timeout, ): controller = lifecycle() outqueue.put( ( worker_startup_event, pid ) ) last_activity = time.time() while controller.active(): if last_activity + idle_timeout < time.time(): outqueue.put( ( worker_termination_event, pid ) ) break try: data = inqueue.get( timeout = waittime ) except Empty: continue if data == termination_signal: outqueue.put( ( worker_shutdown_event, pid ) ) break assert len( data ) == 4 ( jobid, target, args, kwargs ) = data outqueue.put( ( job_started_event, ( jobid, pid ) ) ) controller.record_job_start() try: value = target( *args, **kwargs ) except Exception, e: res = result.error( exception = e, traceback = result.get_traceback_info() ) else: res = result.success( value = value ) outqueue.put( ( job_finished_event, ( jobid, pid, res ) ) ) controller.record_job_end() last_activity = time.time()
@staticmethod def is_full(njobs): return False def job_cycle(outqueue, jobid, target, args, kwargs): try: value = target( *args, **kwargs ) except Exception, e: res = result.error( exception = e, traceback = result.get_traceback_info() ) else: res = result.success( value = value ) outqueue.put( ( jobid, res ) ) class manager(object): """ Job scheduler """ def __init__(self, inqueue, job_factory, capacity, waittime = 0.01): self.inqueue = inqueue self.job_factory = job_factory self.capacity = capacity
for i in self.identifiers ] ) else: assert len( res ) == len( self.identifiers ) for ( identifier, ( failure, data ) ) in zip( self.identifiers, res ): if failure: results.append( Result( identifier = identifier, value = result.error( exception = data ) ) ) else: results.append( Result( identifier = identifier, value = result.success( value = data ) ) ) return results class Pooler(object): """ Pools up a number of jobs and runs this way pool - number of jobs to pool """ def __init__(self, size): assert 0 < size
def finalize(identifier): return Result( identifier = identifier, value = result.success( value = None ) )
def finalize(identifier): return Result(identifier=identifier, value=result.success(value=None))
) for i in self.identifiers ]) else: assert len(res) == len(self.identifiers) for (identifier, (failure, data)) in zip(self.identifiers, res): if failure: results.append( Result(identifier=identifier, value=result.error(exception=data))) else: results.append( Result(identifier=identifier, value=result.success(value=data))) return results class Pooler(object): """ Pools up a number of jobs and runs this way pool - number of jobs to pool """ def __init__(self, size): assert 0 < size self.size = size