def run_jobs(jobs): """ Run jobs using the specified config file and other options. :param jobs: List of LogContexts """ jobarray = len(jobs) > 1 and jobs[0].multi try: poll_queue(jobs[0].calc_id, poll_time=15) # wait for an empty slot or a CTRL-C except BaseException: # the job aborted even before starting for job in jobs: logs.dbcmd('finish', job.calc_id, 'aborted') return jobs else: for job in jobs: dic = {'status': 'executing', 'pid': _PID} logs.dbcmd('update_job', job.calc_id, dic) try: if OQ_DISTRIBUTE == 'zmq' and parallel.workers_status() == []: print('Asking the DbServer to start the workers') logs.dbcmd('workers_start') # start the workers allargs = [(job,) for job in jobs] if jobarray: with general.start_many(run_calc, allargs): pass else: for job in jobs: run_calc(job) finally: # for serialize_jobs > 1 there could be something still running: # don't stop the zworkers in that case! if OQ_DISTRIBUTE == 'zmq' and sum( r for h, r, t in parallel.workers_status()) == 0: print('Stopping the workers') parallel.workers_stop() return jobs
def run_jobs(jobs): """ Run jobs using the specified config file and other options. :param jobs: List of LogContexts """ jobarray = len(jobs) > 1 and jobs[0].multi try: poll_queue(jobs[0].calc_id, poll_time=15) # wait for an empty slot or a CTRL-C except BaseException: # the job aborted even before starting for job in jobs: logs.dbcmd('finish', job.calc_id, 'aborted') return jobs else: for job in jobs: dic = {'status': 'executing', 'pid': _PID} logs.dbcmd('update_job', job.calc_id, dic) try: if config.zworkers['host_cores'] and parallel.workers_status() == []: print('Asking the DbServer to start the workers') logs.dbcmd('workers_start') # start the workers allargs = [(job, ) for job in jobs] if jobarray: with general.start_many(run_calc, allargs): pass else: for job in jobs: run_calc(job) finally: if config.zworkers['host_cores']: print('Stopping the workers') parallel.workers_stop() return jobs
def test(self): with start_many(double, [(1, 1), (2, 2), (3, 3)]): pass
def run_jobs(job_inis, log_level='info', log_file=None, exports='', username=getpass.getuser(), **kw): """ Run jobs using the specified config file and other options. :param str job_inis: A list of paths to .ini files, or a list of job dictionaries :param str log_level: 'debug', 'info', 'warn', 'error', or 'critical' :param str log_file: Path to log file. :param exports: A comma-separated string of export types requested by the user. :param username: Name of the user running the job :param kw: Extra parameters like hazard_calculation_id and calculation_mode """ dist = parallel.oq_distribute() jobparams = [] multi = kw.pop('multi', None) loglvl = getattr(logging, log_level.upper()) jobs = create_jobs(job_inis, loglvl, kw) hc_id = kw.pop('hazard_calculation_id', None) for job in jobs: job_id = job['_job_id'] with logs.handle(job_id, log_level, log_file): oqparam = readinput.get_oqparam(job, hc_id=hc_id, **kw) logs.dbcmd( 'update_job', job_id, dict(calculation_mode=oqparam.calculation_mode, description=oqparam.description, user_name=username, hazard_calculation_id=hc_id)) if (not jobparams and not multi and hc_id is None and 'sensitivity_analysis' not in job): hc_id = job_id jobparams.append((job_id, oqparam)) jobarray = len(jobparams) > 1 and multi try: poll_queue(job_id, poll_time=15) # wait for an empty slot or a CTRL-C except BaseException: # the job aborted even before starting for job_id, oqparam in jobparams: logs.dbcmd('finish', job_id, 'aborted') return jobparams else: for job_id, oqparam in jobparams: dic = {'status': 'executing', 'pid': _PID} if jobarray: dic['hazard_calculation_id'] = jobparams[0][0] logs.dbcmd('update_job', job_id, dic) try: if dist == 'zmq' and config.zworkers['host_cores']: logging.info('Asking the DbServer to start the workers') logs.dbcmd('zmq_start') # start the zworkers logs.dbcmd('zmq_wait') # wait for them to go up allargs = [(job_id, oqparam, exports, log_level, log_file) for job_id, oqparam in jobparams] if jobarray: with general.start_many(run_calc, allargs): pass else: for args in allargs: run_calc(*args) finally: if dist == 'zmq' and config.zworkers['host_cores']: logging.info('Stopping the zworkers') logs.dbcmd('zmq_stop') elif dist.startswith('celery'): celery_cleanup(config.distribution.terminate_workers_on_revoke) return jobparams
def run_jobs(job_inis, log_level='info', log_file=None, exports='', username=getpass.getuser(), **kw): """ Run jobs using the specified config file and other options. :param str job_inis: A list of paths to .ini files, or a list of job dictionaries :param str log_level: 'debug', 'info', 'warn', 'error', or 'critical' :param str log_file: Path to log file. :param exports: A comma-separated string of export types requested by the user. :param username: Name of the user running the job :param kw: Extra parameters like hazard_calculation_id and calculation_mode """ jobparams = [] multi = kw.pop('multi', None) loglvl = getattr(logging, log_level.upper()) jobs = create_jobs(job_inis, loglvl, kw) # inizialize the logs if kw.get('hazard_calculation_id'): hc_id = int(kw['hazard_calculation_id']) else: hc_id = None for job in jobs: job_id = job['_job_id'] job['hazard_calculation_id'] = hc_id with logs.handle(job_id, log_level, log_file): dic = dict(calculation_mode=job['calculation_mode'], description=job['description'], user_name=username, is_running=1) if hc_id: dic['hazard_calculation_id'] = hc_id logs.dbcmd('update_job', job_id, dic) if (not jobparams and not multi and 'hazard_calculation_id' not in kw and 'sensitivity_analysis' not in job): hc_id = job_id try: oqparam = readinput.get_oqparam(job) except BaseException: tb = traceback.format_exc() logging.critical(tb) logs.dbcmd('finish', job_id, 'failed') raise jobparams.append((job_id, oqparam)) jobarray = len(jobparams) > 1 and multi try: poll_queue(job_id, poll_time=15) # wait for an empty slot or a CTRL-C except BaseException: # the job aborted even before starting for job_id, oqparam in jobparams: logs.dbcmd('finish', job_id, 'aborted') return jobparams else: for job_id, oqparam in jobparams: dic = {'status': 'executing', 'pid': _PID} if jobarray: dic['hazard_calculation_id'] = jobparams[0][0] logs.dbcmd('update_job', job_id, dic) try: if config.zworkers['host_cores'] and parallel.workers_status() == []: logging.info('Asking the DbServer to start the workers') logs.dbcmd('workers_start') # start the workers allargs = [(job_id, oqparam, exports, log_level, log_file) for job_id, oqparam in jobparams] if jobarray: with general.start_many(run_calc, allargs): pass else: for args in allargs: run_calc(*args) finally: if config.zworkers['host_cores']: logging.info('Stopping the workers') parallel.workers_stop() return jobparams
def run_jobs(job_inis, log_level='info', log_file=None, exports='', username=getpass.getuser(), **kw): """ Run jobs using the specified config file and other options. :param str job_inis: A list of paths to .ini files. :param str log_level: 'debug', 'info', 'warn', 'error', or 'critical' :param str log_file: Path to log file. :param exports: A comma-separated string of export types requested by the user. :param username: Name of the user running the job :param kw: Extra parameters like hazard_calculation_id and calculation_mode """ dist = parallel.oq_distribute() jobparams = [] for job_ini in job_inis: # NB: the logs must be initialized BEFORE everything job_id = logs.init('job', getattr(logging, log_level.upper())) with logs.handle(job_id, log_level, log_file): oqparam = eng.job_from_file(os.path.abspath(job_ini), job_id, username, **kw) if (not jobparams and 'csm_cache' not in kw and 'hazard_calculation_id' not in kw): kw['hazard_calculation_id'] = job_id jobparams.append((job_id, oqparam)) jobarray = len(jobparams) > 1 and 'csm_cache' in kw try: eng.poll_queue(job_id, poll_time=15) # wait for an empty slot or a CTRL-C except BaseException: # the job aborted even before starting for job_id, oqparam in jobparams: logs.dbcmd('finish', job_id, 'aborted') return jobparams else: for job_id, oqparam in jobparams: dic = {'status': 'executing', 'pid': eng._PID} if jobarray: dic['hazard_calculation_id'] = jobparams[0][0] logs.dbcmd('update_job', job_id, dic) try: if dist == 'zmq' and config.zworkers['host_cores']: logging.info('Asking the DbServer to start the workers') logs.dbcmd('zmq_start') # start the zworkers logs.dbcmd('zmq_wait') # wait for them to go up allargs = [(job_id, oqparam, exports, log_level, log_file) for job_id, oqparam in jobparams] if jobarray: with start_many(eng.run_calc, allargs): pass else: for args in allargs: eng.run_calc(*args) finally: if dist == 'zmq' and config.zworkers['host_cores']: logging.info('Stopping the zworkers') logs.dbcmd('zmq_stop') elif dist.startswith('celery'): eng.celery_cleanup(config.distribution.terminate_workers_on_revoke) return jobparams