Exemplo n.º 1
0
def _run(job_ini, concurrent_tasks, pdb, reuse_input, loglevel, exports,
         params):
    global calc_path
    if 'hazard_calculation_id' in params:
        hc_id = int(params['hazard_calculation_id'])
        if hc_id < 0:  # interpret negative calculation ids
            calc_ids = datastore.get_calc_ids()
            try:
                params['hazard_calculation_id'] = calc_ids[hc_id]
            except IndexError:
                raise SystemExit('There are %d old calculations, cannot '
                                 'retrieve the %s' % (len(calc_ids), hc_id))
        else:
            params['hazard_calculation_id'] = hc_id
    dic = readinput.get_params(job_ini, params)
    # set the logs first of all
    log = logs.init("job", dic, getattr(logging, loglevel.upper()))

    # disable gzip_input
    base.BaseCalculator.gzip_inputs = lambda self: None
    with log, performance.Monitor('total runtime', measuremem=True) as monitor:
        calc = base.calculators(log.get_oqparam(), log.calc_id)
        if reuse_input:  # enable caching
            calc.oqparam.cachedir = datastore.get_datadir()
        calc.run(concurrent_tasks=concurrent_tasks, pdb=pdb, exports=exports)

    logging.info('Total time spent: %s s', monitor.duration)
    logging.info('Memory allocated: %s', general.humansize(monitor.mem))
    print('See the output with silx view %s' % calc.datastore.filename)
    calc_path, _ = os.path.splitext(calc.datastore.filename)  # used below
    return calc
Exemplo n.º 2
0
def main(calc_id):
    """
    Import a remote calculation into the local database. Server, username
    and password must be specified in the openquake.cfg file.
    NB: calc_id can be a local pathname to a datastore not already
    present in the database: in that case it is imported in the db.
    """
    dbserver.ensure_on()
    try:
        calc_id = int(calc_id)
    except ValueError:  # assume calc_id is a pathname
        remote = False
    else:
        remote = True
        job = logs.dbcmd('get_job', calc_id)
        if job is not None:
            sys.exit('There is already a job #%d in the local db' % calc_id)
    if remote:
        datadir = datastore.get_datadir()
        webex = WebExtractor(calc_id)
        hc_id = webex.oqparam.hazard_calculation_id
        if hc_id:
            sys.exit('The job has a parent (#%d) and cannot be '
                     'downloaded' % hc_id)
        webex.dump('%s/calc_%d.hdf5' % (datadir, calc_id))
        webex.close()
    with datastore.read(calc_id) as dstore:
        pprint.pprint(dstore.get_attrs('/'))
        engine.expose_outputs(dstore, status='complete')
    logging.info('Imported calculation %s successfully', calc_id)
Exemplo n.º 3
0
def main(what='contents', calc_id: str_or_int = -1, extra=()):
    """
    Show the content of a datastore (by default the last one).
    """
    datadir = datastore.get_datadir()
    if what == 'all':  # show all
        if not os.path.exists(datadir):
            return
        rows = []
        for calc_id in datastore.get_calc_ids(datadir):
            try:
                ds = datastore.read(calc_id)
                oq = ds['oqparam']
                cmode, descr = oq.calculation_mode, oq.description
            except Exception:
                # invalid datastore file, or missing calculation_mode
                # and description attributes, perhaps due to a manual kill
                f = os.path.join(datadir, 'calc_%s.hdf5' % calc_id)
                logging.warning('Unreadable datastore %s', f)
                continue
            else:
                rows.append((calc_id, cmode, descr.encode('utf-8')))
        for row in sorted(rows, key=lambda row: row[0]):  # by calc_id
            print('#%d %s: %s' % row)
        return

    ds = datastore.read(calc_id)

    # this part is experimental
    if view.keyfunc(what) in view:
        print(view(what, ds))
    elif what.split('/', 1)[0] in extract:
        obj = extract(ds, what, *extra)
        if isinstance(obj, hdf5.ArrayWrapper):
            print_(obj)
        elif hasattr(obj, 'dtype') and obj.dtype.names:
            print(writers.write_csv(io.StringIO(), obj))
        else:
            print(obj)
    elif what in ds:
        obj = ds.getitem(what)
        if '__pdcolumns__' in obj.attrs:
            df = ds.read_df(what)
            print(df.sort_values(df.columns[0]))
        elif hasattr(obj, 'items'):  # is a group of datasets
            print(obj)
        else:  # is a single dataset
            obj.refresh()  # for SWMR mode
            print_(hdf5.ArrayWrapper.from_(obj))
    else:
        print('%s not found' % what)

    ds.close()
Exemplo n.º 4
0
def init(calc_id='nojob', level=logging.INFO):
    """
    1. initialize the root logger (if not already initialized)
    2. set the format of the root handlers (if any)
    3. return a new calculation ID candidate if calc_id is 'job' or 'nojob'
       (with 'nojob' the calculation ID is not stored in the database)
    """
    if not logging.root.handlers:  # first time
        logging.basicConfig(level=level)
    if calc_id == 'job':  # produce a calc_id by creating a job in the db
        calc_id = dbcmd('create_job', datastore.get_datadir())
    elif calc_id == 'nojob':  # produce a calc_id without creating a job
        calc_id = datastore.get_last_calc_id() + 1
    else:
        calc_id = int(calc_id)
        path = os.path.join(datastore.get_datadir(), 'calc_%d.hdf5' % calc_id)
        if os.path.exists(path):
            raise OSError('%s already exists' % path)
    fmt = '[%(asctime)s #{} %(levelname)s] %(message)s'.format(calc_id)
    for handler in logging.root.handlers:
        f = logging.Formatter(fmt, datefmt='%Y-%m-%d %H:%M:%S')
        handler.setFormatter(f)
    return calc_id
Exemplo n.º 5
0
def run2(job_haz, job_risk, calc_id, concurrent_tasks, pdb, reuse_input,
         loglevel, exports, params):
    """
    Run both hazard and risk, one after the other
    """
    oq = readinput.get_oqparam(job_haz, kw=params)
    hcalc = base.calculators(oq, calc_id)
    hcalc.run(concurrent_tasks=concurrent_tasks, pdb=pdb, exports=exports)
    hcalc.datastore.close()
    hc_id = hcalc.datastore.calc_id
    rcalc_id = logs.init('job', level=getattr(logging, loglevel.upper()))
    params['hazard_calculation_id'] = str(hc_id)
    oq = readinput.get_oqparam(job_risk, kw=params)
    rcalc = base.calculators(oq, rcalc_id)
    if reuse_input:  # enable caching
        oq.cachedir = datastore.get_datadir()
    rcalc.run(pdb=pdb, exports=exports)
    return rcalc
Exemplo n.º 6
0
def _run(job_inis, concurrent_tasks, calc_id, pdb, reuse_input, loglevel,
         exports, params):
    global calc_path
    assert len(job_inis) in (1, 2), job_inis
    # set the logs first of all
    calc_id = logs.init(calc_id, getattr(logging, loglevel.upper()))
    # disable gzip_input
    base.BaseCalculator.gzip_inputs = lambda self: None
    with performance.Monitor('total runtime', measuremem=True) as monitor:
        if os.environ.get('OQ_DISTRIBUTE') not in ('no', 'processpool'):
            os.environ['OQ_DISTRIBUTE'] = 'processpool'
        if len(job_inis) == 1:  # run hazard or risk
            if 'hazard_calculation_id' in params:
                hc_id = int(params['hazard_calculation_id'])
            else:
                hc_id = None
            if hc_id and hc_id < 0:  # interpret negative calculation ids
                calc_ids = datastore.get_calc_ids()
                try:
                    params['hazard_calculation_id'] = str(calc_ids[hc_id])
                except IndexError:
                    raise SystemExit(
                        'There are %d old calculations, cannot '
                        'retrieve the %s' % (len(calc_ids), hc_id))
            oqparam = readinput.get_oqparam(job_inis[0], kw=params)
            calc = base.calculators(oqparam, calc_id)
            if reuse_input:  # enable caching
                oqparam.cachedir = datastore.get_datadir()
            calc.run(concurrent_tasks=concurrent_tasks, pdb=pdb,
                     exports=exports)
        else:  # run hazard + risk
            calc = run2(
                job_inis[0], job_inis[1], calc_id, concurrent_tasks, pdb,
                reuse_input, loglevel, exports, params)

    logging.info('Total time spent: %s s', monitor.duration)
    logging.info('Memory allocated: %s', general.humansize(monitor.mem))
    print('See the output with silx view %s' % calc.datastore.filename)
    calc_path, _ = os.path.splitext(calc.datastore.filename)  # used below
    return calc
Exemplo n.º 7
0
Determine the total number of ruptures in all the calculations in oqdata
"""
import glob
from openquake.commonlib.datastore import get_datadir, read
from openquake.calculators.views import text_table


def main(datadir):
    lst = []
    for fname in glob.glob(datadir + '/calc_*.hdf5'):
        try:
            dstore = read(fname)
        except OSError:  # already open
            continue
        with dstore:
            try:
                descr = dstore['oqparam'].description
            except (KeyError, AttributeError):  # not a calculation
                continue
            try:
                tot_ruptures = dstore['full_lt/sg_data']['totrup'].sum()
            except KeyError:
                tot_ruptures = 0
            else:
                lst.append((descr, tot_ruptures))
    print(text_table(lst, ['calculation', 'total number of ruptures']))


if __name__ == '__main__':
    main(get_datadir())
Exemplo n.º 8
0
def main(no_distribute=False,
         yes=False,
         upgrade_db=False,
         db_version=False,
         what_if_I_upgrade=False,
         list_hazard_calculations=False,
         list_risk_calculations=False,
         delete_uncompleted_calculations=False,
         multi=False,
         reuse_input=False,
         *,
         log_file=None,
         make_html_report=None,
         run=None,
         delete_calculation: int = None,
         hazard_calculation_id: int = None,
         list_outputs: int = None,
         show_log=None,
         export_output=None,
         export_outputs=None,
         param='',
         config_file=None,
         exports='',
         log_level='info'):
    """
    Run a calculation using the traditional command line API
    """
    user_name = getpass.getuser()

    if not run:
        # configure a basic logging
        logging.basicConfig(level=logging.INFO)

    if config_file:
        config.read(os.path.abspath(os.path.expanduser(config_file)),
                    limit=int,
                    soft_mem_limit=int,
                    hard_mem_limit=int,
                    port=int,
                    multi_user=valid.boolean,
                    serialize_jobs=valid.boolean,
                    strict=valid.boolean,
                    code=exec)

    if no_distribute:
        os.environ['OQ_DISTRIBUTE'] = 'no'

    # check if the datadir exists
    datadir = datastore.get_datadir()
    if not os.path.exists(datadir):
        os.makedirs(datadir)

    dbserver.ensure_on()
    # check if we are talking to the right server
    err = dbserver.check_foreign()
    if err:
        sys.exit(err)

    if upgrade_db:
        msg = logs.dbcmd('what_if_I_upgrade', 'read_scripts')
        if msg.startswith('Your database is already updated'):
            pass
        elif yes or confirm('Proceed? (y/n) '):
            logs.dbcmd('upgrade_db')
        sys.exit(0)

    if db_version:
        safeprint(logs.dbcmd('db_version'))
        sys.exit(0)

    if what_if_I_upgrade:
        safeprint(logs.dbcmd('what_if_I_upgrade', 'extract_upgrade_scripts'))
        sys.exit(0)

    # check if the db is outdated
    outdated = logs.dbcmd('check_outdated')
    if outdated:
        sys.exit(outdated)

    # hazard or hazard+risk
    if hazard_calculation_id == -1:
        # get the latest calculation of the current user
        hc_id = get_job_id(hazard_calculation_id, user_name)
    elif hazard_calculation_id:
        # make it possible to use calculations made by another user
        hc_id = get_job_id(hazard_calculation_id)
    else:
        hc_id = None
    if run:
        pars = dict(p.split('=', 1) for p in param.split(',')) if param else {}
        if reuse_input:
            pars['cachedir'] = datadir
        log_file = os.path.expanduser(log_file) \
            if log_file is not None else None
        job_inis = [os.path.expanduser(f) for f in run]
        jobs = create_jobs(job_inis, log_level, log_file, user_name, hc_id,
                           multi)
        for job in jobs:
            job.params.update(pars)
            job.params['exports'] = exports
        run_jobs(jobs)

    # hazard
    elif list_hazard_calculations:
        for line in logs.dbcmd('list_calculations', 'hazard',
                               getpass.getuser()):
            safeprint(line)
    elif delete_calculation is not None:
        del_calculation(delete_calculation, yes)
    # risk
    elif list_risk_calculations:
        for line in logs.dbcmd('list_calculations', 'risk', getpass.getuser()):
            safeprint(line)

    # export
    elif make_html_report:
        safeprint('Written %s' % make_report(make_html_report))
        sys.exit(0)

    elif list_outputs is not None:
        hc_id = get_job_id(list_outputs)
        for line in logs.dbcmd('list_outputs', hc_id):
            safeprint(line)
    elif show_log is not None:
        hc_id = get_job_id(show_log)
        for line in logs.dbcmd('get_log', hc_id):
            safeprint(line)

    elif export_output is not None:
        output_id, target_dir = export_output
        dskey, calc_id, datadir = logs.dbcmd('get_output', int(output_id))
        for line in core.export_output(dskey, calc_id, datadir,
                                       os.path.expanduser(target_dir), exports
                                       or DEFAULT_EXPORTS):
            safeprint(line)

    elif export_outputs is not None:
        job_id, target_dir = export_outputs
        hc_id = get_job_id(job_id)
        for line in core.export_outputs(hc_id, os.path.expanduser(target_dir),
                                        exports or DEFAULT_EXPORTS):
            safeprint(line)

    elif delete_uncompleted_calculations:
        logs.dbcmd('delete_uncompleted_calculations', getpass.getuser())
    else:
        print("Please pass some option, see oq engine --help")
Exemplo n.º 9
0
# A server name can be specified to customize the WebUI in case of
# multiple installations of the Engine are available. This helps avoiding
# confusion between different installations when the WebUI is used
SERVER_NAME = socket.gethostname()

# Expose the WebUI interface, otherwise only the REST API will be available
WEBUI = True

# OpenQuake Standalone tools (IPT, Taxtweb, Taxonomy Glossary)
if STANDALONE and WEBUI:
    INSTALLED_APPS += ('openquakeplatform', )

    INSTALLED_APPS += STANDALONE_APPS

    FILE_PATH_FIELD_DIRECTORY = datastore.get_datadir()

    CONTEXT_PROCESSORS = TEMPLATES[0]['OPTIONS']['context_processors']
    CONTEXT_PROCESSORS.insert(0, 'django.template.context_processors.request')
    CONTEXT_PROCESSORS.append('openquakeplatform.utils.oq_context_processor')

try:
    # Try to load a local_settings.py from the current folder; this is useful
    # when packages are used. A custom local_settings.py can be placed in
    # /usr/share/openquake/engine, avoiding changes inside the python package
    from local_settings import *
except ImportError:
    # If no local_settings.py is availble in the current folder let's try to
    # load it from openquake/server/local_settings.py
    try:
        from openquake.server.local_settings import *
Exemplo n.º 10
0
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
import os
import re
import getpass
from openquake.commonlib import logs, datastore

datadir = datastore.get_datadir()


def purge_one(calc_id, user, force):
    """
    Remove one calculation ID from the database and remove its datastore
    """
    logs.dbcmd('del_calc', calc_id, user, force)
    f1 = os.path.join(datadir, 'calc_%s.hdf5' % calc_id)
    f2 = os.path.join(datadir, 'calc_%s_tmp.hdf5' % calc_id)
    for f in [f1, f2]:
        if os.path.exists(f):  # not removed yet
            os.remove(f)
            print('Removed %s' % f)

    def __init__(self, job_ini, event_info, no_distribute=False):
        """
        Initialize a calculation (reinvented from openquake.engine.engine)

        :param job_ini:
            Path to configuration file/archive or
            dictionary of parameters with at least a key "calculation_mode"
        """

        user_name = getpass.getuser()

        if no_distribute:
            os.environ['OQ_DISTRIBUTE'] = 'no'

        # check if the datadir exists
        datadir = datastore.get_datadir()
        if not os.path.exists(datadir):
            os.makedirs(datadir)

        #dbserver.ensure_on()
        if dbserver.get_status() == 'not-running':
            if config.dbserver.multi_user:
                sys.exit('Please start the DbServer: '
                         'see the documentation for details')
            # otherwise start the DbServer automatically; NB: I tried to use
            # multiprocessing.Process(target=run_server).start() and apparently
            # it works, but then run-demos.sh hangs after the end of the first
            # calculation, but only if the DbServer is started by oq engine (!?)
            # Here is a trick to activate OpenQuake's dbserver
            # We first cd to the openquake directory and invoke subprocess to open/hold on dbserver
            # Then, we cd back to the original working directory
            owd = os.getcwd()
            os.chdir(os.path.dirname(os.path.realpath(__file__)))
            subprocess.Popen([
                sys.executable, '-m', 'openquake.commands', 'dbserver', 'start'
            ])
            os.chdir(owd)

            # wait for the dbserver to start
            waiting_seconds = 30
            while dbserver.get_status() == 'not-running':
                if waiting_seconds == 0:
                    sys.exit(
                        'The DbServer cannot be started after 30 seconds. '
                        'Please check the configuration')
                time.sleep(1)
                waiting_seconds -= 1

        # check if we are talking to the right server
        err = dbserver.check_foreign()
        if err:
            sys.exit(err)

        # Copy the event_info
        self.event_info = event_info

        # Create a job
        #self.job = logs.init("job", job_ini, logging.INFO, None, None, None)
        dic = readinput.get_params(job_ini)
        #dic['hazard_calculation_id'] = self.job.calc_id

        # Create the job log
        self.log = logs.init('job', dic, logging.INFO, None, None, None)

        # Get openquake parameters
        self.oqparam = self.log.get_oqparam()

        # Create the calculator
        self.calculator = base.calculators(self.oqparam, self.log.calc_id)
        self.calculator.from_engine = True

        print('FetchOpenQuake: OpenQuake Hazard Calculator initiated.')