def submit_job(request_files, ini, username, hc_id): """ Create a job object from the given files and run it in a new process. :returns: a job ID """ # build a LogContext object associated to a database job [job] = engine.create_jobs([ dict(calculation_mode='preclassical', description='Calculation waiting to start') ], config.distribution.log_level, None, username, hc_id) # store the request files and perform some validation try: job_ini = store(request_files, ini, job.calc_id) job.oqparam = oq = readinput.get_oqparam( job_ini, kw={'hazard_calculation_id': hc_id}) if oq.sensitivity_analysis: logs.dbcmd('set_status', job.calc_id, 'deleted') # hide it jobs = engine.create_jobs([job_ini], config.distribution.log_level, None, username, hc_id, True) else: dic = dict(calculation_mode=oq.calculation_mode, description=oq.description, hazard_calculation_id=hc_id) logs.dbcmd('update_job', job.calc_id, dic) jobs = [job] except Exception: tb = traceback.format_exc() logs.dbcmd('log', job.calc_id, datetime.utcnow(), 'CRITICAL', 'before starting', tb) logs.dbcmd('finish', job.calc_id, 'failed') raise custom_tmp = os.path.dirname(job_ini) submit_cmd = config.distribution.submit_cmd.split() big_job = oq.get_input_size() > int(config.distribution.min_input_size) if submit_cmd == ENGINE: # used for debugging for job in jobs: subprocess.Popen(submit_cmd + [save(job, custom_tmp)]) elif submit_cmd == KUBECTL and big_job: for job in jobs: with open(os.path.join(CWD, 'job.yaml')) as f: yaml = string.Template(f.read()).substitute( CALC_PIK=save(job, custom_tmp), CALC_NAME='calc%d' % job.calc_id) subprocess.run(submit_cmd, input=yaml.encode('ascii')) else: Process(target=engine.run_jobs, args=(jobs, )).start() return job.calc_id
def test_sensitivity(self): job_ini = gettemp('''[general] description = sensitivity test calculation_mode = scenario sites = 0 0 intensity_measure_types = PGA sensitivity_analysis = { 'maximum_distance': [100, 200]}''') run_jobs(create_jobs([job_ini]))
def test_multi_run(self): job_ini = os.path.join(os.path.dirname(case_4.__file__), 'job.ini') jobs = create_jobs([job_ini, job_ini], 'error', multi=True) run_jobs(jobs) with Print.patch(): [r1, r2] = commonlib.logs.dbcmd( 'select id, hazard_calculation_id from job ' 'where id in (?S) order by id', [job.calc_id for job in jobs]) self.assertEqual(r1.hazard_calculation_id, None) self.assertEqual(r2.hazard_calculation_id, None)
def submit_job(job_ini, username, hc_id): """ Create a job object from the given job.ini file in the job directory and run it in a new process. :returns: a job ID """ jobs = engine.create_jobs( [job_ini], config.distribution.log_level, None, username, hc_id) proc = Process(target=engine.run_jobs, args=(jobs,)) proc.start() return jobs[0].calc_id
def test_oqdata(self): # the that the environment variable OQ_DATADIR is honored job_ini = os.path.join(os.path.dirname(case_2.__file__), 'job_2.ini') tempdir = tempfile.mkdtemp() dbserver.ensure_on() with mock.patch.dict(os.environ, OQ_DATADIR=tempdir): [job] = run_jobs(create_jobs([job_ini], 'error')) job = commonlib.logs.dbcmd('get_job', job.calc_id) self.assertTrue(job.ds_calc_dir.startswith(tempdir), job.ds_calc_dir) with Print.patch() as p: sap.runline(f'openquake.commands export ruptures {job.id} ' f'-e csv --export-dir={tempdir}') self.assertIn('Exported', str(p)) shutil.rmtree(tempdir)
def test_ebr(self): # test a single case of `run_jobs`, but it is the most complex one, # event based risk with post processing job_ini = os.path.join(os.path.dirname(case_master.__file__), 'job.ini') with Print.patch() as p: [log] = run_jobs(create_jobs([job_ini], 'error')) self.assertIn('id | name', str(p)) # check the exported outputs expected = set('''\ Aggregate Event Losses Aggregate Loss Curves Aggregate Loss Curves Statistics Aggregate Losses Aggregate Losses Statistics Average Asset Losses Average Asset Losses Statistics Average Ground Motion Field Earthquake Ruptures Events Full Report Ground Motion Fields Hazard Curves Hazard Maps Input Files Realizations Source Loss Table'''.splitlines()) with Print.patch() as p: sap.runline(f'openquake.commands engine --lo {log.calc_id}') got = set(re.findall(r'\| ([\w ]+)', str(p))) - {'name'} if got != expected: print('Missing output', expected - got, file=sys.stderr) # sanity check on the performance views: make sure that the most # relevant information is stored (it can be lost due to a wrong # refactoring of the monitoring and it happened several times) with read(log.calc_id) as dstore: perf = str(view('performance', dstore)) self.assertIn('total event_based_risk', perf)
def main(job_ini, pdb=False, reuse_input=False, *, slowest: int = None, hc: int = None, param='', concurrent_tasks: int = None, exports: valid.export_formats = '', loglevel='info'): """ Run a calculation """ dbserver.ensure_on() if param: params = dict(p.split('=', 1) for p in param.split(',')) else: params = {} if hc: params['hazard_calculation_id'] = str(hc) if slowest: prof = cProfile.Profile() prof.runctx( '_run(job_ini[0], 0, pdb, reuse_input, loglevel, ' 'exports, params)', globals(), locals()) pstat = calc_path + '.pstat' prof.dump_stats(pstat) print('Saved profiling info in %s' % pstat) print(get_pstats(pstat, slowest)) return if len(job_ini) == 1: return _run(job_ini[0], concurrent_tasks, pdb, reuse_input, loglevel, exports, params) jobs = create_jobs(job_ini, loglevel, hc_id=hc) for job in jobs: job.params.update(params) job.params['exports'] = ','.join(exports) run_jobs(jobs)
def test_OQ_REDUCE(self): with mock.patch.dict(os.environ, OQ_REDUCE='.1'): job_ini = os.path.join(os.path.dirname(case_4.__file__), 'job.ini') run_jobs(create_jobs([job_ini]))
def main(no_distribute=False, yes=False, upgrade_db=False, db_version=False, what_if_I_upgrade=False, list_hazard_calculations=False, list_risk_calculations=False, delete_uncompleted_calculations=False, multi=False, reuse_input=False, *, log_file=None, make_html_report=None, run=None, delete_calculation: int = None, hazard_calculation_id: int = None, list_outputs: int = None, show_log=None, export_output=None, export_outputs=None, param='', config_file=None, exports='', log_level='info'): """ Run a calculation using the traditional command line API """ user_name = getpass.getuser() if not run: # configure a basic logging logging.basicConfig(level=logging.INFO) if config_file: config.read(os.path.abspath(os.path.expanduser(config_file)), limit=int, soft_mem_limit=int, hard_mem_limit=int, port=int, multi_user=valid.boolean, serialize_jobs=valid.boolean, strict=valid.boolean, code=exec) if no_distribute: os.environ['OQ_DISTRIBUTE'] = 'no' # check if the datadir exists datadir = datastore.get_datadir() if not os.path.exists(datadir): os.makedirs(datadir) dbserver.ensure_on() # check if we are talking to the right server err = dbserver.check_foreign() if err: sys.exit(err) if upgrade_db: msg = logs.dbcmd('what_if_I_upgrade', 'read_scripts') if msg.startswith('Your database is already updated'): pass elif yes or confirm('Proceed? (y/n) '): logs.dbcmd('upgrade_db') sys.exit(0) if db_version: safeprint(logs.dbcmd('db_version')) sys.exit(0) if what_if_I_upgrade: safeprint(logs.dbcmd('what_if_I_upgrade', 'extract_upgrade_scripts')) sys.exit(0) # check if the db is outdated outdated = logs.dbcmd('check_outdated') if outdated: sys.exit(outdated) # hazard or hazard+risk if hazard_calculation_id == -1: # get the latest calculation of the current user hc_id = get_job_id(hazard_calculation_id, user_name) elif hazard_calculation_id: # make it possible to use calculations made by another user hc_id = get_job_id(hazard_calculation_id) else: hc_id = None if run: pars = dict(p.split('=', 1) for p in param.split(',')) if param else {} if reuse_input: pars['cachedir'] = datadir log_file = os.path.expanduser(log_file) \ if log_file is not None else None job_inis = [os.path.expanduser(f) for f in run] jobs = create_jobs(job_inis, log_level, log_file, user_name, hc_id, multi) for job in jobs: job.params.update(pars) job.params['exports'] = exports run_jobs(jobs) # hazard elif list_hazard_calculations: for line in logs.dbcmd('list_calculations', 'hazard', getpass.getuser()): safeprint(line) elif delete_calculation is not None: del_calculation(delete_calculation, yes) # risk elif list_risk_calculations: for line in logs.dbcmd('list_calculations', 'risk', getpass.getuser()): safeprint(line) # export elif make_html_report: safeprint('Written %s' % make_report(make_html_report)) sys.exit(0) elif list_outputs is not None: hc_id = get_job_id(list_outputs) for line in logs.dbcmd('list_outputs', hc_id): safeprint(line) elif show_log is not None: hc_id = get_job_id(show_log) for line in logs.dbcmd('get_log', hc_id): safeprint(line) elif export_output is not None: output_id, target_dir = export_output dskey, calc_id, datadir = logs.dbcmd('get_output', int(output_id)) for line in core.export_output(dskey, calc_id, datadir, os.path.expanduser(target_dir), exports or DEFAULT_EXPORTS): safeprint(line) elif export_outputs is not None: job_id, target_dir = export_outputs hc_id = get_job_id(job_id) for line in core.export_outputs(hc_id, os.path.expanduser(target_dir), exports or DEFAULT_EXPORTS): safeprint(line) elif delete_uncompleted_calculations: logs.dbcmd('delete_uncompleted_calculations', getpass.getuser()) else: print("Please pass some option, see oq engine --help")