Beispiel #1
0
def run_calc(job_id, oqparam, exports, log_level='info', log_file=None, **kw):
    """
    Run a calculation.

    :param job_id:
        ID of the current job
    :param oqparam:
        :class:`openquake.commonlib.oqvalidation.OqParam` instance
    :param exports:
        A comma-separated string of export types.
    """
    register_signals()
    setproctitle('oq-job-%d' % job_id)
    logs.init(job_id, getattr(logging, log_level.upper()))
    with logs.handle(job_id, log_level, log_file):
        calc = base.calculators(oqparam, calc_id=job_id)
        logging.info('%s running %s [--hc=%s]', getpass.getuser(),
                     calc.oqparam.inputs['job_ini'],
                     calc.oqparam.hazard_calculation_id)
        logging.info('Using engine version %s', __version__)
        msg = check_obsolete_version(oqparam.calculation_mode)
        if msg:
            logging.warning(msg)
        calc.from_engine = True
        tb = 'None\n'
        try:
            if OQ_DISTRIBUTE.endswith('pool'):
                logging.warning('Using %d cores on %s', parallel.CT // 2,
                                platform.node())
            set_concurrent_tasks_default(calc)
            t0 = time.time()
            calc.run(exports=exports, **kw)
            logging.info('Exposing the outputs to the database')
            expose_outputs(calc.datastore)
            path = calc.datastore.filename
            size = general.humansize(os.path.getsize(path))
            logging.info('Stored %s on %s in %d seconds', size, path,
                         time.time() - t0)
            logs.dbcmd('finish', job_id, 'complete')
            calc.datastore.close()
            for line in logs.dbcmd('list_outputs', job_id, False):
                general.safeprint(line)
        except BaseException as exc:
            if isinstance(exc, MasterKilled):
                msg = 'aborted'
            else:
                msg = 'failed'
            tb = traceback.format_exc()
            try:
                logging.critical(tb)
                logs.dbcmd('finish', job_id, msg)
            except BaseException:  # an OperationalError may always happen
                sys.stderr.write(tb)
            raise
        finally:
            parallel.Starmap.shutdown()
    # sanity check to make sure that the logging on file is working
    if log_file and log_file != os.devnull and os.path.getsize(log_file) == 0:
        logging.warning('The log file %s is empty!?' % log_file)
    return calc
def build_report(job_ini, output_dir=None):
    """
    Write a `report.csv` file with information about the calculation
    without running it

    :param job_ini:
        full pathname of the job.ini file
    :param output_dir:
        the directory where the report is written (default the input directory)
    """
    calc_id = logs.init()
    oq = readinput.get_oqparam(job_ini)
    if oq.calculation_mode == 'classical':
        oq.calculation_mode = 'preclassical'
    oq.ground_motion_fields = False
    output_dir = output_dir or os.path.dirname(job_ini)
    from openquake.calculators import base  # ugly
    calc = base.calculators(oq, calc_id)
    calc.save_params()  # needed to save oqparam

    # some taken is care so that the real calculation is not run:
    # the goal is to extract information about the source management only
    calc.pre_execute()
    if oq.calculation_mode == 'preclassical':
        calc.execute()
    rw = ReportWriter(calc.datastore)
    rw.make_report()
    report = (os.path.join(output_dir, 'report.rst')
              if output_dir else calc.datastore.export_path('report.rst'))
    try:
        rw.save(report)
    except IOError as exc:  # permission error
        sys.stderr.write(str(exc) + '\n')
    readinput.exposure = None  # ugly hack
    return report
Beispiel #3
0
def run_job(job_ini, log_level='info', log_file=None, exports='',
            username=getpass.getuser(), **kw):
    """
    Run a job using the specified config file and other options.

    :param str job_ini:
        Path to calculation config (INI-style) files.
    :param str log_level:
        'debug', 'info', 'warn', 'error', or 'critical'
    :param str log_file:
        Path to log file.
    :param exports:
        A comma-separated string of export types requested by the user.
    :param username:
        Name of the user running the job
    :param kw:
        Extra parameters like hazard_calculation_id and calculation_mode
    """
    job_id = logs.init('job', getattr(logging, log_level.upper()))
    with logs.handle(job_id, log_level, log_file):
        job_ini = os.path.abspath(job_ini)
        oqparam = eng.job_from_file(job_ini, job_id, username, **kw)
        kw['username'] = username
        eng.run_calc(job_id, oqparam, exports, **kw)
        for line in logs.dbcmd('list_outputs', job_id, False):
            safeprint(line)
    return job_id
Beispiel #4
0
def _run(job_ini, concurrent_tasks, pdb, reuse_input, loglevel, exports,
         params):
    global calc_path
    if 'hazard_calculation_id' in params:
        hc_id = int(params['hazard_calculation_id'])
        if hc_id < 0:  # interpret negative calculation ids
            calc_ids = datastore.get_calc_ids()
            try:
                params['hazard_calculation_id'] = calc_ids[hc_id]
            except IndexError:
                raise SystemExit('There are %d old calculations, cannot '
                                 'retrieve the %s' % (len(calc_ids), hc_id))
        else:
            params['hazard_calculation_id'] = hc_id
    dic = readinput.get_params(job_ini, params)
    # set the logs first of all
    log = logs.init("job", dic, getattr(logging, loglevel.upper()))

    # disable gzip_input
    base.BaseCalculator.gzip_inputs = lambda self: None
    with log, performance.Monitor('total runtime', measuremem=True) as monitor:
        calc = base.calculators(log.get_oqparam(), log.calc_id)
        if reuse_input:  # enable caching
            calc.oqparam.cachedir = datastore.get_datadir()
        calc.run(concurrent_tasks=concurrent_tasks, pdb=pdb, exports=exports)

    logging.info('Total time spent: %s s', monitor.duration)
    logging.info('Memory allocated: %s', general.humansize(monitor.mem))
    print('See the output with silx view %s' % calc.datastore.filename)
    calc_path, _ = os.path.splitext(calc.datastore.filename)  # used below
    return calc
Beispiel #5
0
def main(calc_id: int, aggregate_by):
    """
    Re-run the postprocessing after an event based risk calculation
    """
    parent = util.read(calc_id)
    oqp = parent['oqparam']
    aggby = aggregate_by.split(',')
    for tagname in aggby:
        if tagname not in oqp.aggregate_by:
            raise ValueError('%r not in %s' % (tagname, oqp.aggregate_by))
    job_id = logs.init('job', level=logging.INFO)
    dic = dict(
        calculation_mode='reaggregate',
        description=oqp.description + '[aggregate_by=%s]' % aggregate_by,
        user_name=getpass.getuser(), is_running=1, status='executing',
        pid=os.getpid(), hazard_calculation_id=job_id)
    logs.dbcmd('update_job', job_id, dic)
    if os.environ.get('OQ_DISTRIBUTE') not in ('no', 'processpool'):
        os.environ['OQ_DISTRIBUTE'] = 'processpool'
    with logs.handle(job_id, logging.INFO):
        oqp.hazard_calculation_id = parent.calc_id
        parallel.Starmap.init()
        prc = PostRiskCalculator(oqp, job_id)
        try:
            prc.run(aggregate_by=aggby)
            engine.expose_outputs(prc.datastore)
            logs.dbcmd('finish', job_id, 'complete')
        except Exception:
            logs.dbcmd('finish', job_id, 'failed')
        finally:
            parallel.Starmap.shutdown()
def build_report(job_ini, output_dir=None):
    """
    Write a `report.csv` file with information about the calculation
    without running it

    :param job_ini:
        full pathname of the job.ini file
    :param output_dir:
        the directory where the report is written (default the input directory)
    """
    calc_id = logs.init()
    oq = readinput.get_oqparam(job_ini)
    if oq.calculation_mode == 'classical':
        oq.calculation_mode = 'preclassical'
    oq.ground_motion_fields = False
    output_dir = output_dir or os.path.dirname(job_ini)
    from openquake.calculators import base  # ugly
    calc = base.calculators(oq, calc_id)
    calc.save_params()  # needed to save oqparam

    # some taken is care so that the real calculation is not run:
    # the goal is to extract information about the source management only
    calc.pre_execute()
    if oq.calculation_mode == 'preclassical':
        calc.execute()
    rw = ReportWriter(calc.datastore)
    rw.make_report()
    report = (os.path.join(output_dir, 'report.rst') if output_dir
              else calc.datastore.export_path('report.rst'))
    try:
        rw.save(report)
    except IOError as exc:  # permission error
        sys.stderr.write(str(exc) + '\n')
    readinput.exposure = None  # ugly hack
    return report
Beispiel #7
0
def run_job(job_ini,
            log_level='info',
            log_file=None,
            exports='',
            username=getpass.getuser(),
            **kw):
    """
    Run a job using the specified config file and other options.

    :param str job_ini:
        Path to calculation config (INI-style) files.
    :param str log_level:
        'debug', 'info', 'warn', 'error', or 'critical'
    :param str log_file:
        Path to log file.
    :param exports:
        A comma-separated string of export types requested by the user.
    :param username:
        Name of the user running the job
    :param kw:
        Extra parameters like hazard_calculation_id and calculation_mode
    """
    job_id = logs.init('job', getattr(logging, log_level.upper()))
    with logs.handle(job_id, log_level, log_file):
        job_ini = os.path.abspath(job_ini)
        oqparam = eng.job_from_file(job_ini, job_id, username, **kw)
        kw['username'] = username
        eng.run_calc(job_id, oqparam, exports, **kw)
        for line in logs.dbcmd('list_outputs', job_id, False):
            safeprint(line)
    return job_id
Beispiel #8
0
def main(calc_id: int, aggregate_by):
    """
    Re-run the postprocessing after an event based risk calculation
    """
    parent = datastore.read(calc_id)
    oqp = parent['oqparam']
    aggby = aggregate_by.split(',')
    for tagname in aggby:
        if tagname not in oqp.aggregate_by:
            raise ValueError('%r not in %s' % (tagname, oqp.aggregate_by))
    dic = dict(calculation_mode='reaggregate',
               description=oqp.description +
               '[aggregate_by=%s]' % aggregate_by,
               user_name=getpass.getuser(),
               is_running=1,
               status='executing',
               pid=os.getpid(),
               hazard_calculation_id=calc_id)
    log = logs.init('job', dic, logging.INFO)
    if os.environ.get('OQ_DISTRIBUTE') not in ('no', 'processpool'):
        os.environ['OQ_DISTRIBUTE'] = 'processpool'
    with log:
        oqp.hazard_calculation_id = parent.calc_id
        parallel.Starmap.init()
        prc = PostRiskCalculator(oqp, log.calc_id)
        prc.run(aggregate_by=aggby)
        engine.expose_outputs(prc.datastore)
Beispiel #9
0
def create_jobs(job_inis,
                log_level=logging.INFO,
                log_file=None,
                user_name=None,
                hc_id=None,
                multi=False):
    """
    Create job records on the database.

    :returns: a list of LogContext objects
    """
    if len(job_inis) > 1 and not hc_id and not multi:  # first job as hc
        job = logs.init("job", job_inis[0], log_level, log_file, user_name,
                        hc_id)
        hc_id = job.calc_id
        jobs = [job]
        job_inis = job_inis[1:]
    else:
        jobs = []
    for job_ini in job_inis:
        if isinstance(job_ini, dict):
            dic = job_ini
        else:
            # NB: `get_params` must NOT log, since the logging is not
            # configured yet, otherwise the log will disappear :-(
            dic = readinput.get_params(job_ini)
        dic['hazard_calculation_id'] = hc_id
        if 'sensitivity_analysis' in dic:
            analysis = valid.dictionary(dic['sensitivity_analysis'])
            for values in itertools.product(*analysis.values()):
                new = logs.init('job', dic, log_level, None, user_name, hc_id)
                pars = dict(zip(analysis, values))
                for param, value in pars.items():
                    new.params[param] = str(value)
                new.params['description'] = '%s %s' % (
                    new.params['description'], pars)
                new.params['hazard_calculation_id'] = hc_id
                logging.info('Job with %s', pars)
                jobs.append(new)
        else:
            jobs.append(
                logs.init('job', dic, log_level, None, user_name, hc_id))
    if multi:
        for job in jobs:
            job.multi = True
    return jobs
Beispiel #10
0
def main(job_ini_or_zip_or_nrmls):
    """
    Check the validity of job.ini files, job.zip files and .xml files.
    NB: `oq check_input job_haz.ini job_risk.ini` is special-cased so
    that the risk files are checked before the hazard files.
    """
    if os.environ.get('OQ_DISTRIBUTE') not in ('no', 'processpool'):
        os.environ['OQ_DISTRIBUTE'] = 'processpool'
    all_inis = all(f.endswith('.ini') for f in job_ini_or_zip_or_nrmls)
    if all_inis:  # the typical case is job_haz.ini + job_risk.ini
        dic = {}
        for ini in job_ini_or_zip_or_nrmls:
            for key, val in readinput.get_params(ini).items():
                if key == 'inputs' and key in dic:
                    dic[key].update(val)
                else:  # the last wins
                    dic[key] = val
        with logs.init('job', dic) as log:
            logging.info('Running oq check_input %s',
                         ' '.join(job_ini_or_zip_or_nrmls))
            calc = base.calculators(log.get_oqparam(), log.calc_id)
            base.BaseCalculator.gzip_inputs = lambda self: None  # disable
            with mock.patch.dict(os.environ, {'OQ_CHECK_INPUT': '1'}):
                calc.read_inputs()
        return

    for job_ini_or_zip_or_nrml in job_ini_or_zip_or_nrmls:
        if job_ini_or_zip_or_nrml.endswith('.xml'):
            try:
                node = nrml.to_python(job_ini_or_zip_or_nrml)
                if node.tag.endswith('exposureModel'):
                    err = Exposure.check(job_ini_or_zip_or_nrml)
                    if err:
                        logging.warning(err)
                else:
                    logging.info('Checked %s', job_ini_or_zip_or_nrml)
            except Exception as exc:
                sys.exit(exc)
        else:  # .zip
            with logs.init('job', job_ini_or_zip_or_nrml) as log:
                path = os.path.abspath(job_ini_or_zip_or_nrml)
                logging.info('Running oq check_input %s', path)
                calc = base.calculators(log.get_oqparam(), log.calc_id)
                base.BaseCalculator.gzip_inputs = lambda self: None  # disable
                with mock.patch.dict(os.environ, {'OQ_CHECK_INPUT': '1'}):
                    calc.read_inputs()
Beispiel #11
0
 def test_recompute(self):
     # test recomputing aggregate loss curves with post_risk
     self.run_calc(recompute.__file__, 'job.ini')
     parent = self.calc.datastore
     # the parent has aggregate_by = NAME_1, NAME_2, taxonomy
     oq = parent['oqparam']
     oq.__dict__['aggregate_by'] = ['NAME_1']
     job_id = logs.init('nojob', logging.INFO)  # requires the DbServer
     prc = PostRiskCalculator(oq, job_id)
     oq.hazard_calculation_id = parent.calc_id
     with mock.patch.dict(os.environ, {'OQ_DISTRIBUTE': 'no'}):
         prc.run()
Beispiel #12
0
def submit_job(job_ini, username, **kw):
    """
    Create a job object from the given job.ini file in the job directory
    and run it in a new process. Returns a PID.
    """
    # errors in validating oqparam are reported immediately
    params = readinput.get_params(job_ini)
    job_id = logs.init('job')
    params['_job_id'] = job_id
    proc = Process(target=engine.run_jobs,
                   args=([params], config.distribution.log_level, None,
                         '', username), kwargs=kw)
    proc.start()
    return job_id
Beispiel #13
0
def run2(job_haz, job_risk, calc_id, concurrent_tasks, pdb, loglevel,
         exports, params):
    """
    Run both hazard and risk, one after the other
    """
    hcalc = base.calculators(readinput.get_oqparam(job_haz), calc_id)
    hcalc.run(concurrent_tasks=concurrent_tasks, pdb=pdb,
              exports=exports, **params)
    hc_id = hcalc.datastore.calc_id
    rcalc_id = logs.init(level=getattr(logging, loglevel.upper()))
    oq = readinput.get_oqparam(job_risk, hc_id=hc_id)
    rcalc = base.calculators(oq, rcalc_id)
    rcalc.run(pdb=pdb, exports=exports, **params)
    return rcalc
Beispiel #14
0
 def get_calc(self, testfile, job_ini, **kw):
     """
     Return the outputs of the calculation as a dictionary
     """
     self.testdir = os.path.dirname(testfile) if os.path.isfile(testfile) \
         else testfile
     params = readinput.get_params(os.path.join(self.testdir, job_ini), kw)
     oqvalidation.OqParam.calculation_mode.validator.choices = tuple(
         base.calculators)
     oq = oqvalidation.OqParam(**params)
     oq.validate()
     # change this when debugging the test
     log = logs.init('calc', params)
     return base.calculators(oq, log.calc_id)
Beispiel #15
0
def check_input(job_ini_or_zip_or_nrmls):
    for job_ini_or_zip_or_nrml in job_ini_or_zip_or_nrmls:
        if job_ini_or_zip_or_nrml.endswith('.xml'):
            try:
                node = nrml.to_python(job_ini_or_zip_or_nrml)
                if node.tag.endswith('exposureModel'):
                    err = Exposure.check(job_ini_or_zip_or_nrml)
                    if err:
                        logging.warning(err)
                else:
                    logging.info('Checked %s', job_ini_or_zip_or_nrml)
            except Exception as exc:
                sys.exit(exc)
        else:
            oq = readinput.get_oqparam(job_ini_or_zip_or_nrml)
            base.calculators(oq, logs.init()).read_inputs()
Beispiel #16
0
def submit_job(job_ini, username, hazard_calculation_id=None):
    """
    Create a job object from the given job.ini file in the job directory
    and run it in a new process. Returns a PID.
    """
    # errors in validating oqparam are reported immediately
    params = vars(readinput.get_oqparam(job_ini))
    job_id = logs.init('job')
    params['_job_id'] = job_id
    # errors in the calculation are not reported but are visible in the log
    proc = Process(target=engine.run_jobs,
                   args=([params], config.distribution.log_level, None, '',
                         username),
                   kwargs={'hazard_calculation_id': hazard_calculation_id})
    proc.start()
    return job_id
Beispiel #17
0
def _run(job_inis, concurrent_tasks, calc_id, pdb, loglevel, hc, exports,
         params):
    global calc_path
    assert len(job_inis) in (1, 2), job_inis
    # set the logs first of all
    calc_id = logs.init(calc_id, getattr(logging, loglevel.upper()))
    # disable gzip_input
    base.BaseCalculator.gzip_inputs = lambda self: None
    with performance.Monitor('total runtime', measuremem=True) as monitor:
        if os.environ.get('OQ_DISTRIBUTE') not in ('no', 'processpool'):
            os.environ['OQ_DISTRIBUTE'] = 'processpool'
        if len(job_inis) == 1:  # run hazard or risk
            if hc:
                hc_id = hc[0]
                rlz_ids = hc[1:]
            else:
                hc_id = None
                rlz_ids = ()
            oqparam = readinput.get_oqparam(job_inis[0], hc_id=hc_id)
            if not oqparam.cachedir:  # enable caching
                oqparam.cachedir = datastore.get_datadir()
            if hc_id and hc_id < 0:  # interpret negative calculation ids
                calc_ids = datastore.get_calc_ids()
                try:
                    hc_id = calc_ids[hc_id]
                except IndexError:
                    raise SystemExit('There are %d old calculations, cannot '
                                     'retrieve the %s' %
                                     (len(calc_ids), hc_id))
            calc = base.calculators(oqparam, calc_id)
            calc.run(concurrent_tasks=concurrent_tasks,
                     pdb=pdb,
                     exports=exports,
                     hazard_calculation_id=hc_id,
                     rlz_ids=rlz_ids,
                     **params)
        else:  # run hazard + risk
            calc = run2(job_inis[0], job_inis[1], calc_id, concurrent_tasks,
                        pdb, loglevel, exports, params)

    logging.info('Total time spent: %s s', monitor.duration)
    logging.info('Memory allocated: %s', general.humansize(monitor.mem))
    print('See the output with silx view %s' % calc.datastore.filename)
    calc_path, _ = os.path.splitext(calc.datastore.filename)  # used below
    return calc
Beispiel #18
0
def run2(job_haz, job_risk, calc_id, concurrent_tasks, pdb, reuse_input,
         loglevel, exports, params):
    """
    Run both hazard and risk, one after the other
    """
    oq = readinput.get_oqparam(job_haz, kw=params)
    hcalc = base.calculators(oq, calc_id)
    hcalc.run(concurrent_tasks=concurrent_tasks, pdb=pdb, exports=exports)
    hcalc.datastore.close()
    hc_id = hcalc.datastore.calc_id
    rcalc_id = logs.init(level=getattr(logging, loglevel.upper()))
    params['hazard_calculation_id'] = str(hc_id)
    oq = readinput.get_oqparam(job_risk, kw=params)
    rcalc = base.calculators(oq, rcalc_id)
    if reuse_input:  # enable caching
        oq.cachedir = datastore.get_datadir()
    rcalc.run(pdb=pdb, exports=exports)
    return rcalc
Beispiel #19
0
def submit_job(job_ini, username, hazard_job_id=None):
    """
    Create a job object from the given job.ini file in the job directory
    and run it in a new process. Returns the job ID and PID.
    """
    job_id = logs.init('job')
    oq = engine.job_from_file(
        job_ini, job_id, username, hazard_calculation_id=hazard_job_id)
    pik = pickle.dumps(oq, protocol=0)  # human readable protocol
    code = RUNCALC % dict(job_id=job_id, hazard_job_id=hazard_job_id, pik=pik,
                          username=username)
    tmp_py = gettemp(code, suffix='.py')
    # print(code, tmp_py)  # useful when debugging
    devnull = subprocess.DEVNULL
    popen = subprocess.Popen([sys.executable, tmp_py],
                             stdin=devnull, stdout=devnull, stderr=devnull)
    threading.Thread(target=popen.wait).start()
    logs.dbcmd('update_job', job_id, {'pid': popen.pid})
    return job_id, popen.pid
Beispiel #20
0
 def __init__(self):
     try:
         from matplotlib import pyplot
         self.plt = pyplot
         self.fig, self.ax = pyplot.subplots()
     except Exception:  # for instance, no Tkinter
         pass
     self.lookfor = partial(numpy.lookfor, module='openquake')
     self.extract = extract
     self.read = read
     self.nrml = nrml
     self.get__exposure = readinput.get_exposure
     self.get_oqparam = readinput.get_oqparam
     self.get_site_collection = readinput.get_site_collection
     self.get_composite_source_model = readinput.get_composite_source_model
     self.get_exposure = readinput.get_exposure
     self.get_calc = lambda job_ini: get_calc(job_ini, logs.init())
     self.make_hmap = calc.make_hmap
     self.geodetic_distance = geodetic_distance
Beispiel #21
0
def recompute_losses(calc_id, aggregate_by):
    """Re-run the postprocessing after an event based risk calculation"""
    parent = util.read(calc_id)
    oqp = parent['oqparam']
    aggby = aggregate_by.split(',')
    for tagname in aggby:
        if tagname not in oqp.aggregate_by:
            raise ValueError('%r not in %s' % (tagname, oqp.aggregate_by))
    job_id = logs.init('job', level=logging.INFO)
    if os.environ.get('OQ_DISTRIBUTE') not in ('no', 'processpool'):
        os.environ['OQ_DISTRIBUTE'] = 'processpool'
    with logs.handle(job_id, logging.INFO):
        oqp.hazard_calculation_id = calc_id
        parallel.Starmap.init()
        prc = PostRiskCalculator(oqp, job_id)
        try:
            prc.run(aggregate_by=aggby)
        finally:
            parallel.Starmap.shutdown()
Beispiel #22
0
def main(job_ini_or_zip_or_nrmls):
    if os.environ.get('OQ_DISTRIBUTE') not in ('no', 'processpool'):
        os.environ['OQ_DISTRIBUTE'] = 'processpool'
    for job_ini_or_zip_or_nrml in job_ini_or_zip_or_nrmls:
        if job_ini_or_zip_or_nrml.endswith('.xml'):
            try:
                node = nrml.to_python(job_ini_or_zip_or_nrml)
                if node.tag.endswith('exposureModel'):
                    err = Exposure.check(job_ini_or_zip_or_nrml)
                    if err:
                        logging.warning(err)
                else:
                    logging.info('Checked %s', job_ini_or_zip_or_nrml)
            except Exception as exc:
                sys.exit(exc)
        else:
            with logs.init('calc', job_ini_or_zip_or_nrml) as log:
                calc = base.calculators(log.get_oqparam(), log.calc_id)
                base.BaseCalculator.gzip_inputs = lambda self: None  # disable
                with mock.patch.dict(os.environ, {'OQ_CHECK_INPUT': '1'}):
                    calc.read_inputs()
Beispiel #23
0
    def test_recompute(self):
        # test recomputing aggregate loss curves with post_risk
        # this is starting from a ruptures.csv file
        out = self.run_calc(recompute.__file__, 'job.ini', exports='csv')
        [fname] = out['agg_losses-rlzs', 'csv']
        self.assertEqualFiles('expected/agg_losses.csv', fname, delta=1E-5)
        [fname] = out['agg_curves-rlzs', 'csv']
        self.assertEqualFiles('expected/agg_curves.csv', fname, delta=1E-5)

        parent = self.calc.datastore
        # the parent has aggregate_by = NAME_1, NAME_2, taxonomy
        oq = parent['oqparam']
        oq.__dict__['aggregate_by'] = ['NAME_1']
        job_id = logs.init('nojob', logging.INFO)  # requires the DbServer
        prc = PostRiskCalculator(oq, job_id)
        oq.hazard_calculation_id = parent.calc_id
        with mock.patch.dict(os.environ, {'OQ_DISTRIBUTE': 'no'}):
            prc.run()
        [fname] = export(('agg_losses-rlzs', 'csv'), prc.datastore)
        self.assertEqualFiles('expected/recomputed_losses.csv', fname,
                              delta=1E-5)
Beispiel #24
0
def _run(job_inis, concurrent_tasks, pdb, loglevel, hc, exports, params):
    global calc_path
    assert len(job_inis) in (1, 2), job_inis
    # set the logs first of all
    calc_id = logs.init(level=getattr(logging, loglevel.upper()))
    with performance.Monitor('total runtime', measuremem=True) as monitor:
        if len(job_inis) == 1:  # run hazard or risk
            if hc:
                hc_id = hc[0]
                rlz_ids = hc[1:]
            else:
                hc_id = None
                rlz_ids = ()
            oqparam = readinput.get_oqparam(job_inis[0], hc_id=hc_id)
            vars(oqparam).update(params)
            if hc_id and hc_id < 0:  # interpret negative calculation ids
                calc_ids = datastore.get_calc_ids()
                try:
                    hc_id = calc_ids[hc_id]
                except IndexError:
                    raise SystemExit('There are %d old calculations, cannot '
                                     'retrieve the %s' %
                                     (len(calc_ids), hc_id))
            calc = base.calculators(oqparam, calc_id)
            calc.run(concurrent_tasks=concurrent_tasks,
                     pdb=pdb,
                     exports=exports,
                     hazard_calculation_id=hc_id,
                     rlz_ids=rlz_ids)
        else:  # run hazard + risk
            calc = run2(job_inis[0], job_inis[1], calc_id, concurrent_tasks,
                        pdb, loglevel, exports, params)

    logging.info('Total time spent: %s s', monitor.duration)
    logging.info('Memory allocated: %s', general.humansize(monitor.mem))
    print('See the output with hdfview %s' % calc.datastore.hdf5path)
    calc_path, _ = os.path.splitext(calc.datastore.hdf5path)  # used below
    return calc
    def test_recompute(self):
        # test recomputing aggregate loss curves with post_risk
        # this is starting from a ruptures.csv file
        out = self.run_calc(recompute.__file__, 'job.ini', exports='csv')
        [fname] = out['aggrisk', 'csv']
        self.assertEqualFiles('expected/agg_losses.csv', fname, delta=1E-5)
        [fname] = out['aggcurves', 'csv']
        self.assertEqualFiles('expected/aggcurves.csv', fname, delta=1E-5)

        parent = self.calc.datastore
        # the parent has aggregate_by = NAME_1, NAME_2, taxonomy
        oq = parent['oqparam']
        oq.__dict__['aggregate_by'] = ['NAME_1']
        log = logs.init('job', {'calculation_mode': 'post_risk',
                                'description': 'test recompute'})
        prc = PostRiskCalculator(oq, log.calc_id)
        prc.assetcol = self.calc.assetcol
        oq.hazard_calculation_id = parent.calc_id
        with mock.patch.dict(os.environ, {'OQ_DISTRIBUTE': 'no'}), log:
            prc.run()
        [fname] = export(('aggrisk', 'csv'), prc.datastore)
        self.assertEqualFiles('expected/recomputed_losses.csv', fname,
                              delta=1E-5)
Beispiel #26
0
def _run(job_inis, concurrent_tasks, pdb, loglevel, hc, exports, params):
    global calc_path
    assert len(job_inis) in (1, 2), job_inis
    # set the logs first of all
    calc_id = logs.init(level=getattr(logging, loglevel.upper()))
    with performance.Monitor('total runtime', measuremem=True) as monitor:
        if len(job_inis) == 1:  # run hazard or risk
            if hc:
                hc_id = hc[0]
                rlz_ids = hc[1:]
            else:
                hc_id = None
                rlz_ids = ()
            oqparam = readinput.get_oqparam(job_inis[0], hc_id=hc_id)
            vars(oqparam).update(params)
            if hc_id and hc_id < 0:  # interpret negative calculation ids
                calc_ids = datastore.get_calc_ids()
                try:
                    hc_id = calc_ids[hc_id]
                except IndexError:
                    raise SystemExit(
                        'There are %d old calculations, cannot '
                        'retrieve the %s' % (len(calc_ids), hc_id))
            calc = base.calculators(oqparam, calc_id)
            calc.run(concurrent_tasks=concurrent_tasks, pdb=pdb,
                     exports=exports, hazard_calculation_id=hc_id,
                     rlz_ids=rlz_ids)
        else:  # run hazard + risk
            calc = run2(
                job_inis[0], job_inis[1], calc_id, concurrent_tasks, pdb,
                loglevel, exports, params)

    logging.info('Total time spent: %s s', monitor.duration)
    logging.info('Memory allocated: %s', general.humansize(monitor.mem))
    print('See the output with silx view %s' % calc.datastore.filename)
    calc_path, _ = os.path.splitext(calc.datastore.filename)  # used below
    return calc
Beispiel #27
0
 def get_calc(self, job_ini):
     log = logs.init("job", job_ini)
     log.__enter__()
     return calculators(log.get_oqparam(), log.calc_id)
Beispiel #28
0
def engine(log_file, no_distribute, yes, config_file, make_html_report,
           upgrade_db, db_version, what_if_I_upgrade, run,
           list_hazard_calculations, list_risk_calculations,
           delete_calculation, delete_uncompleted_calculations,
           hazard_calculation_id, list_outputs, show_log,
           export_output, export_outputs, exports='',
           log_level='info', reuse_hazard=False):
    """
    Run a calculation using the traditional command line API
    """
    if not run:
        # configure a basic logging
        logs.init()

    if config_file:
        config.read(os.path.abspath(os.path.expanduser(config_file)),
                    soft_mem_limit=int, hard_mem_limit=int, port=int,
                    multi_user=valid.boolean, multi_node=valid.boolean)

    if no_distribute:
        os.environ['OQ_DISTRIBUTE'] = 'no'

    # check if the datadir exists
    datadir = datastore.get_datadir()
    if not os.path.exists(datadir):
        os.makedirs(datadir)

    dbserver.ensure_on()
    # check if we are talking to the right server
    err = dbserver.check_foreign()
    if err:
        sys.exit(err)

    if upgrade_db:
        msg = logs.dbcmd('what_if_I_upgrade', 'read_scripts')
        if msg.startswith('Your database is already updated'):
            pass
        elif yes or confirm('Proceed? (y/n) '):
            logs.dbcmd('upgrade_db')
        sys.exit(0)

    if db_version:
        safeprint(logs.dbcmd('db_version'))
        sys.exit(0)

    if what_if_I_upgrade:
        safeprint(logs.dbcmd('what_if_I_upgrade', 'extract_upgrade_scripts'))
        sys.exit(0)

    # check if the db is outdated
    outdated = logs.dbcmd('check_outdated')
    if outdated:
        sys.exit(outdated)

    # hazard or hazard+risk
    if hazard_calculation_id == -1:
        # get the latest calculation of the current user
        hc_id = get_job_id(hazard_calculation_id, getpass.getuser())
    elif hazard_calculation_id:
        # make it possible to use calculations made by another user
        hc_id = get_job_id(hazard_calculation_id)
    else:
        hc_id = None
    if run:
        log_file = os.path.expanduser(log_file) \
            if log_file is not None else None
        job_inis = [os.path.expanduser(f) for f in run]
        if len(job_inis) == 1 and not hc_id:
            # init logs before calling get_oqparam
            logs.init('nojob', getattr(logging, log_level.upper()))
            # not using logs.handle that logs on the db
            oq = readinput.get_oqparam(job_inis[0])
            smart_run(job_inis[0], oq, log_level, log_file,
                      exports, reuse_hazard)
            return
        for i, job_ini in enumerate(job_inis):
            open(job_ini, 'rb').read()  # IOError if the file does not exist
            job_id = run_job(job_ini, log_level, log_file,
                             exports, hazard_calculation_id=hc_id)
            if not hc_id:  # use the first calculation as base for the others
                hc_id = job_id
    # hazard
    elif list_hazard_calculations:
        for line in logs.dbcmd(
                'list_calculations', 'hazard', getpass.getuser()):
            safeprint(line)
    elif delete_calculation is not None:
        del_calculation(delete_calculation, yes)
    # risk
    elif list_risk_calculations:
        for line in logs.dbcmd('list_calculations', 'risk', getpass.getuser()):
            safeprint(line)

    # export
    elif make_html_report:
        safeprint('Written %s' % make_report(make_html_report))
        sys.exit(0)

    elif list_outputs is not None:
        hc_id = get_job_id(list_outputs)
        for line in logs.dbcmd('list_outputs', hc_id):
            safeprint(line)
    elif show_log is not None:
        hc_id = get_job_id(show_log)
        for line in logs.dbcmd('get_log', hc_id):
            safeprint(line)

    elif export_output is not None:
        output_id, target_dir = export_output
        dskey, calc_id, datadir = logs.dbcmd('get_output', int(output_id))
        for line in core.export_output(
                dskey, calc_id, datadir, os.path.expanduser(target_dir),
                exports or 'csv,xml'):
            safeprint(line)

    elif export_outputs is not None:
        job_id, target_dir = export_outputs
        hc_id = get_job_id(job_id)
        for line in core.export_outputs(
                hc_id, os.path.expanduser(target_dir), exports or 'csv,xml'):
            safeprint(line)

    elif delete_uncompleted_calculations:
        logs.dbcmd('delete_uncompleted_calculations', getpass.getuser())

    else:
        engine.parentparser.prog = 'oq engine'
        engine.parentparser.print_usage()
Beispiel #29
0
def _init_logs(dic, lvl):
    if '_job_id' in dic:
        logs.init(dic['_job_id'], lvl)
    else:
        dic['_job_id'] = logs.init('job', lvl)
Beispiel #30
0
    def post_execute(self, result):
        oq = self.oqparam
        if not oq.ground_motion_fields:
            return
        N = len(self.sitecol.complete)
        L = len(oq.imtls.array)
        if result and oq.hazard_curves_from_gmfs:
            rlzs = self.rlzs_assoc.realizations
            # compute and save statistics; this is done in process and can
            # be very slow if there are thousands of realizations
            weights = [rlz.weight for rlz in rlzs]
            # NB: in the future we may want to save to individual hazard
            # curves if oq.individual_curves is set; for the moment we
            # save the statistical curves only
            hstats = oq.hazard_stats()
            S = len(hstats)
            pmaps = list(result.values())
            R = len(weights)
            if len(pmaps) != R:
                # this should never happen, unless I break the
                # logic tree reduction mechanism during refactoring
                raise AssertionError('Expected %d pmaps, got %d' %
                                     (len(weights), len(pmaps)))
            if oq.individual_curves:
                logging.info('Saving individual hazard curves')
                self.datastore.create_dset('hcurves-rlzs', F32, (N, R, L))
                self.datastore.set_attrs('hcurves-rlzs', nbytes=N * R * L * 4)
                if oq.poes:
                    P = len(oq.poes)
                    M = len(oq.imtls)
                    ds = self.datastore.create_dset(
                        'hmaps-rlzs', F32, (N, R, M, P))
                    self.datastore.set_attrs(
                        'hmaps-rlzs', nbytes=N * R * P * M * 4)
                for r, pmap in enumerate(pmaps):
                    arr = numpy.zeros((N, L), F32)
                    for sid in pmap:
                        arr[sid] = pmap[sid].array[:, 0]
                    self.datastore['hcurves-rlzs'][:, r] = arr
                    if oq.poes:
                        hmap = calc.make_hmap(pmap, oq.imtls, oq.poes)
                        for sid in hmap:
                            ds[sid, r] = hmap[sid].array

            if S:
                logging.info('Computing statistical hazard curves')
                self.datastore.create_dset('hcurves-stats', F32, (N, S, L))
                self.datastore.set_attrs('hcurves-stats', nbytes=N * S * L * 4)
                if oq.poes:
                    P = len(oq.poes)
                    M = len(oq.imtls)
                    ds = self.datastore.create_dset(
                        'hmaps-stats', F32, (N, S, M, P))
                    self.datastore.set_attrs(
                        'hmaps-stats', nbytes=N * S * P * M * 4)
                for s, stat in enumerate(hstats):
                    pmap = compute_pmap_stats(
                        pmaps, [hstats[stat]], weights, oq.imtls)
                    arr = numpy.zeros((N, L), F32)
                    for sid in pmap:
                        arr[sid] = pmap[sid].array[:, 0]
                    self.datastore['hcurves-stats'][:, s] = arr
                    if oq.poes:
                        hmap = calc.make_hmap(pmap, oq.imtls, oq.poes)
                        for sid in hmap:
                            ds[sid, s] = hmap[sid].array

        if self.datastore.parent:
            self.datastore.parent.open('r')
        if oq.compare_with_classical:  # compute classical curves
            export_dir = os.path.join(oq.export_dir, 'cl')
            if not os.path.exists(export_dir):
                os.makedirs(export_dir)
            oq.export_dir = export_dir
            job_id = logs.init('job')
            self.cl = ClassicalCalculator(oq, job_id)
            # TODO: perhaps it is possible to avoid reprocessing the source
            # model, however usually this is quite fast and do not dominate
            # the computation
            self.cl.run(close=False)
            engine.expose_outputs(self.cl.datastore)
            cl_mean_curves = get_mean_curves(self.cl.datastore)
            eb_mean_curves = get_mean_curves(self.datastore)
            self.rdiff, index = util.max_rel_diff_index(
                cl_mean_curves, eb_mean_curves)
            logging.warning('Relative difference with the classical '
                            'mean curves: %d%% at site index %d',
                            self.rdiff * 100, index)
Beispiel #31
0
def main(no_distribute=False,
         yes=False,
         upgrade_db=False,
         db_version=False,
         what_if_I_upgrade=False,
         list_hazard_calculations=False,
         list_risk_calculations=False,
         delete_uncompleted_calculations=False,
         multi=False,
         reuse_input=False,
         *,
         log_file=None,
         make_html_report=None,
         run=None,
         delete_calculation: int = None,
         hazard_calculation_id: int = None,
         list_outputs: int = None,
         show_log=None,
         export_output=None,
         export_outputs=None,
         param='',
         config_file=None,
         exports='',
         log_level='info'):
    """
    Run a calculation using the traditional command line API
    """
    if not run:
        # configure a basic logging
        logs.init()

    if config_file:
        config.read(os.path.abspath(os.path.expanduser(config_file)),
                    soft_mem_limit=int,
                    hard_mem_limit=int,
                    port=int,
                    multi_user=valid.boolean,
                    serialize_jobs=valid.boolean,
                    strict=valid.boolean,
                    code=exec)

    if no_distribute:
        os.environ['OQ_DISTRIBUTE'] = 'no'

    # check if the datadir exists
    datadir = datastore.get_datadir()
    if not os.path.exists(datadir):
        os.makedirs(datadir)

    dbserver.ensure_on()
    # check if we are talking to the right server
    err = dbserver.check_foreign()
    if err:
        sys.exit(err)

    if upgrade_db:
        msg = logs.dbcmd('what_if_I_upgrade', 'read_scripts')
        if msg.startswith('Your database is already updated'):
            pass
        elif yes or confirm('Proceed? (y/n) '):
            logs.dbcmd('upgrade_db')
        sys.exit(0)

    if db_version:
        safeprint(logs.dbcmd('db_version'))
        sys.exit(0)

    if what_if_I_upgrade:
        safeprint(logs.dbcmd('what_if_I_upgrade', 'extract_upgrade_scripts'))
        sys.exit(0)

    # check if the db is outdated
    outdated = logs.dbcmd('check_outdated')
    if outdated:
        sys.exit(outdated)

    # hazard or hazard+risk
    if hazard_calculation_id == -1:
        # get the latest calculation of the current user
        hc_id = get_job_id(hazard_calculation_id, getpass.getuser())
    elif hazard_calculation_id:
        # make it possible to use calculations made by another user
        hc_id = get_job_id(hazard_calculation_id)
    else:
        hc_id = None
    if run:
        pars = dict(p.split('=', 1) for p in param.split(',')) if param else {}
        if reuse_input:
            pars['cachedir'] = datadir
        if hc_id:
            pars['hazard_calculation_id'] = str(hc_id)
        log_file = os.path.expanduser(log_file) \
            if log_file is not None else None
        job_inis = [os.path.expanduser(f) for f in run]
        pars['multi'] = multi
        run_jobs(job_inis, log_level, log_file, exports, **pars)

    # hazard
    elif list_hazard_calculations:
        for line in logs.dbcmd('list_calculations', 'hazard',
                               getpass.getuser()):
            safeprint(line)
    elif delete_calculation is not None:
        del_calculation(delete_calculation, yes)
    # risk
    elif list_risk_calculations:
        for line in logs.dbcmd('list_calculations', 'risk', getpass.getuser()):
            safeprint(line)

    # export
    elif make_html_report:
        safeprint('Written %s' % make_report(make_html_report))
        sys.exit(0)

    elif list_outputs is not None:
        hc_id = get_job_id(list_outputs)
        for line in logs.dbcmd('list_outputs', hc_id):
            safeprint(line)
    elif show_log is not None:
        hc_id = get_job_id(show_log)
        for line in logs.dbcmd('get_log', hc_id):
            safeprint(line)

    elif export_output is not None:
        output_id, target_dir = export_output
        dskey, calc_id, datadir = logs.dbcmd('get_output', int(output_id))
        for line in core.export_output(dskey, calc_id, datadir,
                                       os.path.expanduser(target_dir), exports
                                       or DEFAULT_EXPORTS):
            safeprint(line)

    elif export_outputs is not None:
        job_id, target_dir = export_outputs
        hc_id = get_job_id(job_id)
        for line in core.export_outputs(hc_id, os.path.expanduser(target_dir),
                                        exports or DEFAULT_EXPORTS):
            safeprint(line)

    elif delete_uncompleted_calculations:
        logs.dbcmd('delete_uncompleted_calculations', getpass.getuser())
    else:
        print("Please pass some option, see oq engine --help")
Beispiel #32
0
def _init_logs(dic, lvl):
    if '_job_id' in dic:  # reuse job_id
        logs.init(dic['_job_id'], lvl)
    else:  # create a new job_id
        dic['_job_id'] = logs.init('job', lvl)
Beispiel #33
0
def run_jobs(job_inis, log_level='info', log_file=None, exports='',
             username=getpass.getuser(), **kw):
    """
    Run jobs using the specified config file and other options.

    :param str job_inis:
        A list of paths to .ini files.
    :param str log_level:
        'debug', 'info', 'warn', 'error', or 'critical'
    :param str log_file:
        Path to log file.
    :param exports:
        A comma-separated string of export types requested by the user.
    :param username:
        Name of the user running the job
    :param kw:
        Extra parameters like hazard_calculation_id and calculation_mode
    """
    dist = parallel.oq_distribute()
    jobparams = []
    for job_ini in job_inis:
        # NB: the logs must be initialized BEFORE everything
        job_id = logs.init('job', getattr(logging, log_level.upper()))
        with logs.handle(job_id, log_level, log_file):
            oqparam = eng.job_from_file(os.path.abspath(job_ini), job_id,
                                        username, **kw)
        if (not jobparams and 'csm_cache' not in kw
                and 'hazard_calculation_id' not in kw):
            kw['hazard_calculation_id'] = job_id
        jobparams.append((job_id, oqparam))
    jobarray = len(jobparams) > 1 and 'csm_cache' in kw
    try:
        eng.poll_queue(job_id, poll_time=15)
        # wait for an empty slot or a CTRL-C
    except BaseException:
        # the job aborted even before starting
        for job_id, oqparam in jobparams:
            logs.dbcmd('finish', job_id, 'aborted')
        return jobparams
    else:
        for job_id, oqparam in jobparams:
            dic = {'status': 'executing', 'pid': eng._PID}
            if jobarray:
                dic['hazard_calculation_id'] = jobparams[0][0]
            logs.dbcmd('update_job', job_id, dic)
    try:
        if dist == 'zmq' and config.zworkers['host_cores']:
            logging.info('Asking the DbServer to start the workers')
            logs.dbcmd('zmq_start')  # start the zworkers
            logs.dbcmd('zmq_wait')  # wait for them to go up
        allargs = [(job_id, oqparam, exports, log_level, log_file)
                   for job_id, oqparam in jobparams]
        if jobarray:
            with start_many(eng.run_calc, allargs):
                pass
        else:
            for args in allargs:
                eng.run_calc(*args)
    finally:
        if dist == 'zmq' and config.zworkers['host_cores']:
            logging.info('Stopping the zworkers')
            logs.dbcmd('zmq_stop')
        elif dist.startswith('celery'):
            eng.celery_cleanup(config.distribution.terminate_workers_on_revoke)
    return jobparams
Beispiel #34
0
    def post_execute(self, result):
        oq = self.oqparam
        if not oq.ground_motion_fields and not oq.hazard_curves_from_gmfs:
            return
        N = len(self.sitecol.complete)
        M = len(oq.imtls)
        L = len(oq.imtls.array)
        L1 = L // M
        if result and oq.hazard_curves_from_gmfs:
            rlzs = self.datastore['full_lt'].get_realizations()
            # compute and save statistics; this is done in process and can
            # be very slow if there are thousands of realizations
            weights = [rlz.weight for rlz in rlzs]
            # NB: in the future we may want to save to individual hazard
            # curves if oq.individual_curves is set; for the moment we
            # save the statistical curves only
            hstats = oq.hazard_stats()
            S = len(hstats)
            pmaps = list(result.values())
            R = len(weights)
            if len(pmaps) != R:
                # this should never happen, unless I break the
                # logic tree reduction mechanism during refactoring
                raise AssertionError('Expected %d pmaps, got %d' %
                                     (len(weights), len(pmaps)))
            if oq.individual_curves:
                logging.info('Saving individual hazard curves')
                self.datastore.create_dset('hcurves-rlzs', F32, (N, R, M, L1))
                self.datastore.set_shape_attrs('hcurves-rlzs',
                                               site_id=N,
                                               rlz_id=R,
                                               imt=list(oq.imtls),
                                               lvl=numpy.arange(L1))
                if oq.poes:
                    P = len(oq.poes)
                    M = len(oq.imtls)
                    ds = self.datastore.create_dset('hmaps-rlzs', F32,
                                                    (N, R, M, P))
                    self.datastore.set_shape_attrs('hmaps-rlzs',
                                                   site_id=N,
                                                   rlz_id=R,
                                                   imt=list(oq.imtls),
                                                   poe=oq.poes)
                for r, pmap in enumerate(pmaps):
                    arr = numpy.zeros((N, M, L1), F32)
                    for sid in pmap:
                        arr[sid] = pmap[sid].array.reshape(M, L1)
                    self.datastore['hcurves-rlzs'][:, r] = arr
                    if oq.poes:
                        hmap = calc.make_hmap(pmap, oq.imtls, oq.poes)
                        for sid in hmap:
                            ds[sid, r] = hmap[sid].array

            if S:
                logging.info('Computing statistical hazard curves')
                self.datastore.create_dset('hcurves-stats', F32, (N, S, M, L1))
                self.datastore.set_shape_attrs('hcurves-stats',
                                               site_id=N,
                                               stat=list(hstats),
                                               imt=list(oq.imtls),
                                               lvl=numpy.arange(L1))
                if oq.poes:
                    P = len(oq.poes)
                    M = len(oq.imtls)
                    ds = self.datastore.create_dset('hmaps-stats', F32,
                                                    (N, S, M, P))
                    self.datastore.set_shape_attrs('hmaps-stats',
                                                   site_id=N,
                                                   stat=list(hstats),
                                                   imt=list(oq.imtls),
                                                   poes=oq.poes)
                for s, stat in enumerate(hstats):
                    pmap = compute_pmap_stats(pmaps, [hstats[stat]], weights,
                                              oq.imtls)
                    arr = numpy.zeros((N, M, L1), F32)
                    for sid in pmap:
                        arr[sid] = pmap[sid].array.reshape(M, L1)
                    self.datastore['hcurves-stats'][:, s] = arr
                    if oq.poes:
                        hmap = calc.make_hmap(pmap, oq.imtls, oq.poes)
                        for sid in hmap:
                            ds[sid, s] = hmap[sid].array

        if self.datastore.parent:
            self.datastore.parent.open('r')
        if oq.compare_with_classical:  # compute classical curves
            export_dir = os.path.join(oq.export_dir, 'cl')
            if not os.path.exists(export_dir):
                os.makedirs(export_dir)
            oq.export_dir = export_dir
            job_id = logs.init('job')
            oq.calculation_mode = 'classical'
            self.cl = ClassicalCalculator(oq, job_id)
            # TODO: perhaps it is possible to avoid reprocessing the source
            # model, however usually this is quite fast and do not dominate
            # the computation
            self.cl.run()
            engine.expose_outputs(self.datastore)
            for imt in oq.imtls:
                cl_mean_curves = get_mean_curves(self.datastore, imt)
                eb_mean_curves = get_mean_curves(self.datastore, imt)
                self.rdiff, index = util.max_rel_diff_index(
                    cl_mean_curves, eb_mean_curves)
                logging.warning(
                    'Relative difference with the classical '
                    'mean curves: %d%% at site index %d, imt=%s',
                    self.rdiff * 100, index, imt)
Beispiel #35
0
def engine(log_file,
           no_distribute,
           yes,
           config_file,
           make_html_report,
           upgrade_db,
           db_version,
           what_if_I_upgrade,
           run,
           list_hazard_calculations,
           list_risk_calculations,
           delete_calculation,
           delete_uncompleted_calculations,
           hazard_calculation_id,
           list_outputs,
           show_log,
           export_output,
           export_outputs,
           exports='',
           log_level='info',
           reuse_hazard=False):
    """
    Run a calculation using the traditional command line API
    """
    if not run:
        # configure a basic logging
        logs.init()

    if config_file:
        config.read(os.path.abspath(os.path.expanduser(config_file)),
                    soft_mem_limit=int,
                    hard_mem_limit=int,
                    port=int,
                    multi_user=valid.boolean,
                    multi_node=valid.boolean)

    if no_distribute:
        os.environ['OQ_DISTRIBUTE'] = 'no'

    # check if the datadir exists
    datadir = datastore.get_datadir()
    if not os.path.exists(datadir):
        os.makedirs(datadir)

    dbserver.ensure_on()
    # check if we are talking to the right server
    err = dbserver.check_foreign()
    if err:
        sys.exit(err)

    if upgrade_db:
        msg = logs.dbcmd('what_if_I_upgrade', 'read_scripts')
        if msg.startswith('Your database is already updated'):
            pass
        elif yes or confirm('Proceed? (y/n) '):
            logs.dbcmd('upgrade_db')
        sys.exit(0)

    if db_version:
        safeprint(logs.dbcmd('db_version'))
        sys.exit(0)

    if what_if_I_upgrade:
        safeprint(logs.dbcmd('what_if_I_upgrade', 'extract_upgrade_scripts'))
        sys.exit(0)

    # check if the db is outdated
    outdated = logs.dbcmd('check_outdated')
    if outdated:
        sys.exit(outdated)

    # hazard or hazard+risk
    if hazard_calculation_id == -1:
        # get the latest calculation of the current user
        hc_id = get_job_id(hazard_calculation_id, getpass.getuser())
    elif hazard_calculation_id:
        # make it possible to use calculations made by another user
        hc_id = get_job_id(hazard_calculation_id)
    else:
        hc_id = None
    if run:
        log_file = os.path.expanduser(log_file) \
            if log_file is not None else None
        job_inis = [os.path.expanduser(f) for f in run]
        if len(job_inis) == 1 and not hc_id:
            # init logs before calling get_oqparam
            logs.init('nojob', getattr(logging, log_level.upper()))
            # not using logs.handle that logs on the db
            oq = readinput.get_oqparam(job_inis[0])
            smart_run(job_inis[0], oq, log_level, log_file, exports,
                      reuse_hazard)
            return
        for i, job_ini in enumerate(job_inis):
            open(job_ini, 'rb').read()  # IOError if the file does not exist
            job_id = run_job(job_ini,
                             log_level,
                             log_file,
                             exports,
                             hazard_calculation_id=hc_id)
            if not hc_id:  # use the first calculation as base for the others
                hc_id = job_id
    # hazard
    elif list_hazard_calculations:
        for line in logs.dbcmd('list_calculations', 'hazard',
                               getpass.getuser()):
            safeprint(line)
    elif delete_calculation is not None:
        del_calculation(delete_calculation, yes)
    # risk
    elif list_risk_calculations:
        for line in logs.dbcmd('list_calculations', 'risk', getpass.getuser()):
            safeprint(line)

    # export
    elif make_html_report:
        safeprint('Written %s' % make_report(make_html_report))
        sys.exit(0)

    elif list_outputs is not None:
        hc_id = get_job_id(list_outputs)
        for line in logs.dbcmd('list_outputs', hc_id):
            safeprint(line)
    elif show_log is not None:
        hc_id = get_job_id(show_log)
        for line in logs.dbcmd('get_log', hc_id):
            safeprint(line)

    elif export_output is not None:
        output_id, target_dir = export_output
        dskey, calc_id, datadir = logs.dbcmd('get_output', int(output_id))
        for line in core.export_output(dskey, calc_id, datadir,
                                       os.path.expanduser(target_dir), exports
                                       or 'csv,xml'):
            safeprint(line)

    elif export_outputs is not None:
        job_id, target_dir = export_outputs
        hc_id = get_job_id(job_id)
        for line in core.export_outputs(hc_id, os.path.expanduser(target_dir),
                                        exports or 'csv,xml'):
            safeprint(line)

    elif delete_uncompleted_calculations:
        logs.dbcmd('delete_uncompleted_calculations', getpass.getuser())

    else:
        engine.parentparser.prog = 'oq engine'
        engine.parentparser.print_usage()