示例#1
0
    def test_sensitivity(self):
        job_ini = gettemp('''[general]
description = sensitivity test
calculation_mode = scenario
sites = 0 0
sensitivity_analysis = {
  'maximum_distance': [100, 200]}''')
        run_jobs([job_ini])
示例#2
0
 def test_multi_run(self):
     job_ini = os.path.join(os.path.dirname(case_4.__file__), 'job.ini')
     jobs = create_jobs([job_ini, job_ini], 'error', multi=True)
     run_jobs(jobs)
     with Print.patch():
         [r1, r2] = commonlib.logs.dbcmd(
             'select id, hazard_calculation_id from job '
             'where id in (?S) order by id', [job.calc_id for job in jobs])
     self.assertEqual(r1.hazard_calculation_id, None)
     self.assertEqual(r2.hazard_calculation_id, None)
示例#3
0
 def test_multi_run(self):
     job_ini = os.path.join(os.path.dirname(case_4.__file__), 'job.ini')
     jobparams = run_jobs([job_ini, job_ini], log_level='error', multi=True)
     jobs, params = zip(*jobparams)
     with Print.patch():
         [r1, r2] = commonlib.logs.dbcmd(
             'select id, hazard_calculation_id from job '
             'where id in (?S) order by id', jobs)
     self.assertEqual(r1.hazard_calculation_id, r1.id)
     self.assertEqual(r2.hazard_calculation_id, r1.id)
示例#4
0
def main(job_ini,
         pdb=False,
         reuse_input=False,
         *,
         slowest: int = None,
         hc: int = None,
         param='',
         concurrent_tasks: int = None,
         exports: valid.export_formats = '',
         loglevel='info'):
    """
    Run a calculation
    """
    dbserver.ensure_on()
    if param:
        params = dict(p.split('=', 1) for p in param.split(','))
    else:
        params = {}
    if hc:
        params['hazard_calculation_id'] = str(hc)
    if slowest:
        prof = cProfile.Profile()
        prof.runctx(
            '_run(job_ini[0], 0, pdb, reuse_input, loglevel, '
            'exports, params)', globals(), locals())
        pstat = calc_path + '.pstat'
        prof.dump_stats(pstat)
        print('Saved profiling info in %s' % pstat)
        print(get_pstats(pstat, slowest))
        return
    if len(job_ini) == 1:
        return _run(job_ini[0], concurrent_tasks, pdb, reuse_input, loglevel,
                    exports, params)
    jobs = create_jobs(job_ini, loglevel, hc_id=hc)
    for job in jobs:
        job.params.update(params)
        job.params['exports'] = ','.join(exports)
    run_jobs(jobs)
示例#5
0
 def test_oqdata(self):
     # the that the environment variable OQ_DATADIR is honored
     job_ini = os.path.join(os.path.dirname(case_2.__file__), 'job_2.ini')
     tempdir = tempfile.mkdtemp()
     dbserver.ensure_on()
     with mock.patch.dict(os.environ, OQ_DATADIR=tempdir):
         [(job_id, oq)] = run_jobs([job_ini], log_level='error')
         job = commonlib.logs.dbcmd('get_job', job_id)
         self.assertTrue(job.ds_calc_dir.startswith(tempdir),
                         job.ds_calc_dir)
     with Print.patch() as p:
         export('ruptures', job_id, 'csv', tempdir)
     self.assertIn('Exported', str(p))
     shutil.rmtree(tempdir)
示例#6
0
    def test_ebr(self):
        # test a single case of `run_jobs`, but it is the most complex one,
        # event based risk with post processing
        job_ini = os.path.join(
            os.path.dirname(case_master.__file__), 'job.ini')
        with Print.patch() as p:
            [(job_id, oqparam)] = run_jobs([job_ini], log_level='error')
        self.assertIn('id | name', str(p))

        # sanity check on the performance views: make sure that the most
        # relevant information is stored (it can be lost due to a wrong
        # refactoring of the monitoring and it happened several times)
        with read(job_id) as dstore:
            perf = view('performance', dstore)
            self.assertIn('total event_based_risk', perf)
示例#7
0
 def test_oqdata(self):
     # the that the environment variable OQ_DATADIR is honored
     job_ini = os.path.join(os.path.dirname(case_2.__file__), 'job_2.ini')
     tempdir = tempfile.mkdtemp()
     dbserver.ensure_on()
     with mock.patch.dict(os.environ, OQ_DATADIR=tempdir):
         [job] = run_jobs(create_jobs([job_ini], 'error'))
         job = commonlib.logs.dbcmd('get_job', job.calc_id)
         self.assertTrue(job.ds_calc_dir.startswith(tempdir),
                         job.ds_calc_dir)
     with Print.patch() as p:
         sap.runline(f'openquake.commands export ruptures {job.id} '
                     f'-e csv --export-dir={tempdir}')
     self.assertIn('Exported', str(p))
     shutil.rmtree(tempdir)
示例#8
0
    def test_ebr(self):
        # test a single case of `run_jobs`, but it is the most complex one,
        # event based risk with post processing
        job_ini = os.path.join(os.path.dirname(case_master.__file__),
                               'job.ini')
        with Print.patch() as p:
            [log] = run_jobs(create_jobs([job_ini], 'error'))
        self.assertIn('id | name', str(p))

        # check the exported outputs
        expected = set('''\
Aggregate Event Losses
Aggregate Loss Curves
Aggregate Loss Curves Statistics
Aggregate Losses
Aggregate Losses Statistics
Average Asset Losses
Average Asset Losses Statistics
Average Ground Motion Field
Earthquake Ruptures
Events
Full Report
Ground Motion Fields
Hazard Curves
Hazard Maps
Input Files
Realizations
Source Loss Table'''.splitlines())
        with Print.patch() as p:
            sap.runline(f'openquake.commands engine --lo {log.calc_id}')
        got = set(re.findall(r'\| ([\w ]+)', str(p))) - {'name'}
        if got != expected:
            print('Missing output', expected - got, file=sys.stderr)
        # sanity check on the performance views: make sure that the most
        # relevant information is stored (it can be lost due to a wrong
        # refactoring of the monitoring and it happened several times)
        with read(log.calc_id) as dstore:
            perf = str(view('performance', dstore))
            self.assertIn('total event_based_risk', perf)
示例#9
0
def main(no_distribute=False,
         yes=False,
         upgrade_db=False,
         db_version=False,
         what_if_I_upgrade=False,
         list_hazard_calculations=False,
         list_risk_calculations=False,
         delete_uncompleted_calculations=False,
         multi=False,
         reuse_input=False,
         *,
         log_file=None,
         make_html_report=None,
         run=None,
         delete_calculation: int = None,
         hazard_calculation_id: int = None,
         list_outputs: int = None,
         show_log=None,
         export_output=None,
         export_outputs=None,
         param='',
         config_file=None,
         exports='',
         log_level='info'):
    """
    Run a calculation using the traditional command line API
    """
    if not run:
        # configure a basic logging
        logs.init()

    if config_file:
        config.read(os.path.abspath(os.path.expanduser(config_file)),
                    soft_mem_limit=int,
                    hard_mem_limit=int,
                    port=int,
                    multi_user=valid.boolean,
                    serialize_jobs=valid.boolean,
                    strict=valid.boolean,
                    code=exec)

    if no_distribute:
        os.environ['OQ_DISTRIBUTE'] = 'no'

    # check if the datadir exists
    datadir = datastore.get_datadir()
    if not os.path.exists(datadir):
        os.makedirs(datadir)

    dbserver.ensure_on()
    # check if we are talking to the right server
    err = dbserver.check_foreign()
    if err:
        sys.exit(err)

    if upgrade_db:
        msg = logs.dbcmd('what_if_I_upgrade', 'read_scripts')
        if msg.startswith('Your database is already updated'):
            pass
        elif yes or confirm('Proceed? (y/n) '):
            logs.dbcmd('upgrade_db')
        sys.exit(0)

    if db_version:
        safeprint(logs.dbcmd('db_version'))
        sys.exit(0)

    if what_if_I_upgrade:
        safeprint(logs.dbcmd('what_if_I_upgrade', 'extract_upgrade_scripts'))
        sys.exit(0)

    # check if the db is outdated
    outdated = logs.dbcmd('check_outdated')
    if outdated:
        sys.exit(outdated)

    # hazard or hazard+risk
    if hazard_calculation_id == -1:
        # get the latest calculation of the current user
        hc_id = get_job_id(hazard_calculation_id, getpass.getuser())
    elif hazard_calculation_id:
        # make it possible to use calculations made by another user
        hc_id = get_job_id(hazard_calculation_id)
    else:
        hc_id = None
    if run:
        pars = dict(p.split('=', 1) for p in param.split(',')) if param else {}
        if reuse_input:
            pars['cachedir'] = datadir
        if hc_id:
            pars['hazard_calculation_id'] = str(hc_id)
        log_file = os.path.expanduser(log_file) \
            if log_file is not None else None
        job_inis = [os.path.expanduser(f) for f in run]
        pars['multi'] = multi
        run_jobs(job_inis, log_level, log_file, exports, **pars)

    # hazard
    elif list_hazard_calculations:
        for line in logs.dbcmd('list_calculations', 'hazard',
                               getpass.getuser()):
            safeprint(line)
    elif delete_calculation is not None:
        del_calculation(delete_calculation, yes)
    # risk
    elif list_risk_calculations:
        for line in logs.dbcmd('list_calculations', 'risk', getpass.getuser()):
            safeprint(line)

    # export
    elif make_html_report:
        safeprint('Written %s' % make_report(make_html_report))
        sys.exit(0)

    elif list_outputs is not None:
        hc_id = get_job_id(list_outputs)
        for line in logs.dbcmd('list_outputs', hc_id):
            safeprint(line)
    elif show_log is not None:
        hc_id = get_job_id(show_log)
        for line in logs.dbcmd('get_log', hc_id):
            safeprint(line)

    elif export_output is not None:
        output_id, target_dir = export_output
        dskey, calc_id, datadir = logs.dbcmd('get_output', int(output_id))
        for line in core.export_output(dskey, calc_id, datadir,
                                       os.path.expanduser(target_dir), exports
                                       or DEFAULT_EXPORTS):
            safeprint(line)

    elif export_outputs is not None:
        job_id, target_dir = export_outputs
        hc_id = get_job_id(job_id)
        for line in core.export_outputs(hc_id, os.path.expanduser(target_dir),
                                        exports or DEFAULT_EXPORTS):
            safeprint(line)

    elif delete_uncompleted_calculations:
        logs.dbcmd('delete_uncompleted_calculations', getpass.getuser())
    else:
        print("Please pass some option, see oq engine --help")
示例#10
0
 def test_OQ_REDUCE(self):
     with mock.patch.dict(os.environ, OQ_REDUCE='10'):
         job_ini = os.path.join(os.path.dirname(case_4.__file__), 'job.ini')
         run_jobs([job_ini])