Exemple #1
0
def export_from_datastore(output_key, output, target):
    """
    :param output_key: a pair (ds_key, fmt)
    :param output: an Output instance
    :param target: a directory, temporary when called from the engine server
    """
    ds_key, fmt = output_key
    assert ds_key == output.ds_key, (ds_key, output.ds_key)
    datadir = os.path.dirname(output.oq_job.ds_calc_dir)
    dstore = datastore.read(output.oq_job.id, datadir=datadir)
    parent_id = dstore['oqparam'].hazard_calculation_id
    if parent_id:
        dstore.set_parent(datastore.read(parent_id, datadir=datadir))
    dstore.export_dir = target
    try:
        exported = ds_export((output.ds_key, fmt), dstore)
    except KeyError:
        raise DataStoreExportError(
            'Could not export %s in %s' % (output.ds_key, fmt))
    if not exported:
        raise DataStoreExportError(
            'Nothing to export for %s' % output.ds_key)
    elif len(exported) > 1:
        # NB: I am hiding the archive by starting its name with a '.',
        # to avoid confusing the users, since the unzip files are
        # already in the target directory; the archive is used internally
        # by the WebUI, so it must be there; it would be nice not to
        # generate it when not using the Web UI, but I will leave that
        # feature for after the removal of the old calculators
        archname = '.' + output.ds_key + '-' + fmt + '.zip'
        zipfiles(exported, os.path.join(target, archname))
        return os.path.join(target, archname)
    else:  # single file
        return exported[0]
Exemple #2
0
def plot(calc_id, other_id=None, sites='0'):
    """
    Hazard curves plotter.
    """
    # read the hazard data
    haz = datastore.read(calc_id)
    other = datastore.read(other_id) if other_id else None
    oq = haz['oqparam']
    indices = list(map(int, sites.split(',')))
    n_sites = len(haz['sitemesh'])
    if not set(indices) <= set(range(n_sites)):
        invalid = sorted(set(indices) - set(range(n_sites)))
        print('The indices %s are invalid: no graph for them' % invalid)
    valid = sorted(set(range(n_sites)) & set(indices))
    print('Found %d site(s); plotting %d of them' % (n_sites, len(valid)))
    if other is None:
        curves_by_rlz, mean_curves = get_hcurves_and_means(haz)
        single_curve = len(curves_by_rlz) == 1 or not getattr(
            oq, 'individual_curves', True)
        plt = make_figure(valid, oq.imtls, mean_curves,
                          {} if single_curve else curves_by_rlz, 'mean')
    else:
        mean1 = haz['hcurves/mean']
        mean2 = other['hcurves/mean']
        plt = make_figure(valid, oq.imtls, mean1, {'mean': mean2}, 'reference')
    plt.show()
Exemple #3
0
def plot(calc_id, other_id=None, sites='0'):
    """
    Hazard curves plotter.

    :param calc_id: calculation numeric ID
    :param other_id: ID of another calculation (optional)
    :param sites: comma-separated string with the site indices
    """
    # read the hazard data
    haz = datastore.read(calc_id)
    other = datastore.read(other_id) if other_id else None
    oq = haz['oqparam']
    indices = list(map(int, sites.split(',')))
    n_sites = len(haz['sitemesh'])
    if not set(indices) <= set(range(n_sites)):
        invalid = sorted(set(indices) - set(range(n_sites)))
        print('The indices %s are invalid: no graph for them' % invalid)
    valid = sorted(set(range(n_sites)) & set(indices))
    print('Found %d site(s); plotting %d of them' % (n_sites, len(valid)))
    curves_by_rlz, mean_curves = get_hcurves_and_means(haz)
    if other is None:
        single_curve = len(curves_by_rlz) == 1 or not getattr(
            oq, 'individual_curves', True)
        plt = make_figure(valid, oq.imtls, mean_curves,
                          {} if single_curve else curves_by_rlz, 'mean')
    else:
        _, mean1 = get_hcurves_and_means(haz)
        _, mean2 = get_hcurves_and_means(other)
        plt = make_figure(valid, oq.imtls, mean1, {'mean': mean2}, 'reference')
    plt.show()
Exemple #4
0
def plot(calc_id, other_id=None, sites='0'):
    """
    Hazard curves plotter.
    """
    # read the hazard data
    haz = datastore.read(calc_id)
    other = datastore.read(other_id) if other_id else None
    oq = haz['oqparam']
    indices = numpy.array(list(map(int, sites.split(','))))
    n_sites = len(haz['sitecol'])
    if not set(indices) <= set(range(n_sites)):
        invalid = sorted(set(indices) - set(range(n_sites)))
        print('The indices %s are invalid: no graph for them' % invalid)
    valid = sorted(set(range(n_sites)) & set(indices))
    print('Found %d site(s); plotting %d of them' % (n_sites, len(valid)))
    if other is None:
        mean_curves, pmaps = get_pmaps(haz, indices)
        single_curve = len(pmaps) == 1
        plt = make_figure(valid, n_sites, oq.imtls, mean_curves,
                          [] if single_curve else pmaps, 'mean')
    else:
        mean1, _ = get_pmaps(haz, indices)
        mean2, _ = get_pmaps(other, indices)
        plt = make_figure(valid, n_sites, oq.imtls, mean1, [mean2],
                          'reference')
    plt.show()
Exemple #5
0
def export_from_datastore(output_key, output, target):
    """
    :param output_key: a pair (ds_key, fmt)
    :param output: an Output instance
    :param target: a directory, temporary when called from the engine server
    """
    ds_key, fmt = output_key
    assert ds_key == output.ds_key, (ds_key, output.ds_key)
    datadir = os.path.dirname(output.oq_job.ds_calc_dir)
    dstore = datastore.read(output.oq_job.id, datadir=datadir)
    parent_id = dstore['oqparam'].hazard_calculation_id
    if parent_id:
        dstore.set_parent(datastore.read(parent_id, datadir=datadir))
    dstore.export_dir = target
    try:
        exported = ds_export((output.ds_key, fmt), dstore)
    except KeyError:
        raise DataStoreExportError('Could not export %s in %s' %
                                   (output.ds_key, fmt))
    if not exported:
        raise DataStoreExportError('Nothing to export for %s' % output.ds_key)
    elif len(exported) > 1:
        # NB: I am hiding the archive by starting its name with a '.',
        # to avoid confusing the users, since the unzip files are
        # already in the target directory; the archive is used internally
        # by the WebUI, so it must be there; it would be nice not to
        # generate it when not using the Web UI, but I will leave that
        # feature for after the removal of the old calculators
        archname = '.' + output.ds_key + '-' + fmt + '.zip'
        zipfiles(exported, os.path.join(target, archname))
        return os.path.join(target, archname)
    else:  # single file
        return exported[0]
Exemple #6
0
def show(what, calc_id=-1):
    """
    Show the content of a datastore.

    :param what: key or view of the datastore
    :param calc_id: numeric calculation ID; if -1, show the last calculation
    """
    if what == 'all':  # show all
        if not os.path.exists(datastore.DATADIR):
            return
        rows = []
        for calc_id in datastore.get_calc_ids(datastore.DATADIR):
            try:
                ds = datastore.read(calc_id)
                oq = ds['oqparam']
                cmode, descr = oq.calculation_mode, oq.description
            except:
                # invalid datastore file, or missing calculation_mode
                # and description attributes, perhaps due to a manual kill
                f = os.path.join(datastore.DATADIR, 'calc_%s.hdf5' % calc_id)
                logging.warn('Unreadable datastore %s', f)
                continue
            else:
                rows.append((calc_id, cmode, descr.encode('utf-8')))
                ds.close()
        for row in sorted(rows, key=lambda row: row[0]):  # by calc_id
            print('#%d %s: %s' % row)
        return
    elif what == 'views':
        for name in sorted(datastore.view):
            print(name)
        return

    ds = datastore.read(calc_id)

    # this part is experimental
    if what == 'rlzs' and 'hcurves' in ds:
        min_value = 0.01  # used in rmsep
        curves_by_rlz, mean_curves = get_hcurves_and_means(ds)
        dists = []
        for rlz, curves in curves_by_rlz.items():
            dist = sum(
                rmsep(mean_curves[imt], curves[imt], min_value)
                for imt in mean_curves.dtype.fields)
            dists.append((dist, rlz))
        print('Realizations in order of distance from the mean curves')
        for dist, rlz in sorted(dists):
            print('%s: rmsep=%s' % (rlz, dist))
    elif what in datastore.view:
        print(datastore.view(what, ds))
    else:
        obj = ds[what]
        if hasattr(obj, 'value'):  # an array
            print(write_csv(io.StringIO(), obj.value))
        else:
            print(obj)
Exemple #7
0
def show(what, calc_id=-1):
    """
    Show the content of a datastore.

    :param what: key or view of the datastore
    :param calc_id: numeric calculation ID; if -1, show the last calculation
    """
    if what == 'all':  # show all
        if not os.path.exists(datastore.DATADIR):
            return
        rows = []
        for calc_id in datastore.get_calc_ids(datastore.DATADIR):
            try:
                ds = datastore.read(calc_id)
                oq = ds['oqparam']
                cmode, descr = oq.calculation_mode, oq.description
            except:
                # invalid datastore file, or missing calculation_mode
                # and description attributes, perhaps due to a manual kill
                f = os.path.join(datastore.DATADIR, 'calc_%s.hdf5' % calc_id)
                logging.warn('Unreadable datastore %s', f)
                continue
            else:
                rows.append((calc_id, cmode, descr.encode('utf-8')))
                ds.close()
        for row in sorted(rows, key=lambda row: row[0]):  # by calc_id
            print('#%d %s: %s' % row)
        return
    elif what == 'views':
        for name in sorted(datastore.view):
            print(name)
        return

    ds = datastore.read(calc_id)

    # this part is experimental
    if what == 'rlzs' and 'hcurves' in ds:
        min_value = 0.01  # used in rmsep
        curves_by_rlz, mean_curves = get_hcurves_and_means(ds)
        dists = []
        for rlz, curves in curves_by_rlz.items():
            dist = sum(rmsep(mean_curves[imt], curves[imt], min_value)
                       for imt in mean_curves.dtype.fields)
            dists.append((dist, rlz))
        print('Realizations in order of distance from the mean curves')
        for dist, rlz in sorted(dists):
            print('%s: rmsep=%s' % (rlz, dist))
    elif what in datastore.view:
        print(datastore.view(what, ds))
    else:
        obj = ds[what]
        if hasattr(obj, 'value'):  # an array
            print(write_csv(io.StringIO(), obj.value))
        else:
            print(obj)
Exemple #8
0
def main(what='contents', calc_id: str_or_int = -1, extra=()):
    """
    Show the content of a datastore (by default the last one).
    """
    datadir = datastore.get_datadir()
    if what == 'all':  # show all
        if not os.path.exists(datadir):
            return
        rows = []
        for calc_id in datastore.get_calc_ids(datadir):
            try:
                ds = datastore.read(calc_id)
                oq = ds['oqparam']
                cmode, descr = oq.calculation_mode, oq.description
            except Exception:
                # invalid datastore file, or missing calculation_mode
                # and description attributes, perhaps due to a manual kill
                f = os.path.join(datadir, 'calc_%s.hdf5' % calc_id)
                logging.warning('Unreadable datastore %s', f)
                continue
            else:
                rows.append((calc_id, cmode, descr.encode('utf-8')))
        for row in sorted(rows, key=lambda row: row[0]):  # by calc_id
            print('#%d %s: %s' % row)
        return

    ds = datastore.read(calc_id)

    # this part is experimental
    if view.keyfunc(what) in view:
        print(view(what, ds))
    elif what.split('/', 1)[0] in extract:
        obj = extract(ds, what, *extra)
        if isinstance(obj, hdf5.ArrayWrapper):
            print_(obj)
        elif hasattr(obj, 'dtype') and obj.dtype.names:
            print(writers.write_csv(io.StringIO(), obj))
        else:
            print(obj)
    elif what in ds:
        obj = ds.getitem(what)
        if '__pdcolumns__' in obj.attrs:
            df = ds.read_df(what)
            print(df.sort_values(df.columns[0]))
        elif hasattr(obj, 'items'):  # is a group of datasets
            print(obj)
        else:  # is a single dataset
            obj.refresh()  # for SWMR mode
            print_(hdf5.ArrayWrapper.from_(obj))
    else:
        print('%s not found' % what)

    ds.close()
Exemple #9
0
def compare_rups(calc_1: int, calc_2: int):
    """
    Compare the ruptures of two calculations as pandas DataFrames
    """
    with datastore.read(calc_1) as ds1, datastore.read(calc_2) as ds2:
        df1 = ds1.read_df('rup').sort_values(['src_id', 'mag'])
        df2 = ds2.read_df('rup').sort_values(['src_id', 'mag'])
    cols = [col for col in df1.columns if col not in
            {'probs_occur_', 'clon_', 'clat_'}]
    for col in cols:
        a1 = df1[col].to_numpy()
        a2 = df2[col].to_numpy()
        assert len(a1) == len(a2), (len(a1), len(a2))
        _print_diff(a1, a2, df1.index, df2.index, col)
Exemple #10
0
 def read_ruptures(self, calc_id, field):
     dstore = datastore.read(calc_id)
     lst = []
     for name, dset in dstore.items():
         if name.startswith('rup_'):
             lst.append(dset[field][:])
     return numpy.concatenate(lst)
Exemple #11
0
def expose_outputs(job_id):
    """
    Build a correspondence between the outputs in the datastore and the
    ones in the database.

    :param job_id: job ID
    """
    exportable = set(ekey[0] for ekey in export.export)
    job = models.OqJob.objects.get(pk=job_id)
    with datastore.read(job.id,
                        datadir=os.path.dirname(job.ds_calc_dir)) as dstore:
        # small hack: remove the sescollection outputs from scenario
        # calculators, as requested by Vitor
        calcmode = job.calculation_mode
        if 'scenario' in calcmode and 'sescollection' in exportable:
            exportable.remove('sescollection')
        uhs = dstore['oqparam'].uniform_hazard_spectra
        if uhs and 'hmaps' in dstore:
            models.Output.objects.create_output(job, 'uhs', ds_key='uhs')
        for key in dstore:
            if key in exportable:
                if key == 'realizations' and len(dstore['realizations']) == 1:
                    continue  # do not export a single realization
                models.Output.objects.create_output(job,
                                                    DISPLAY_NAME.get(key, key),
                                                    ds_key=key)
Exemple #12
0
def main(calc_id: int, aggregate_by):
    """
    Re-run the postprocessing after an event based risk calculation
    """
    parent = datastore.read(calc_id)
    oqp = parent['oqparam']
    aggby = aggregate_by.split(',')
    for tagname in aggby:
        if tagname not in oqp.aggregate_by:
            raise ValueError('%r not in %s' % (tagname, oqp.aggregate_by))
    dic = dict(calculation_mode='reaggregate',
               description=oqp.description +
               '[aggregate_by=%s]' % aggregate_by,
               user_name=getpass.getuser(),
               is_running=1,
               status='executing',
               pid=os.getpid(),
               hazard_calculation_id=calc_id)
    log = logs.init('job', dic, logging.INFO)
    if os.environ.get('OQ_DISTRIBUTE') not in ('no', 'processpool'):
        os.environ['OQ_DISTRIBUTE'] = 'processpool'
    with log:
        oqp.hazard_calculation_id = parent.calc_id
        parallel.Starmap.init()
        prc = PostRiskCalculator(oqp, log.calc_id)
        prc.run(aggregate_by=aggby)
        engine.expose_outputs(prc.datastore)
Exemple #13
0
def main(calc_id: int):
    """
    Reduce the source model of the given (pre)calculation by discarding all
    sources that do not contribute to the hazard.
    """
    if os.environ.get('OQ_DISTRIBUTE') not in ('no', 'processpool'):
        os.environ['OQ_DISTRIBUTE'] = 'processpool'
    with datastore.read(calc_id) as dstore:
        oqparam = dstore['oqparam']
        info = dstore['source_info'][()]
    src_ids = info['source_id']
    num_ids = len(src_ids)
    bad_ids = info[info['eff_ruptures'] == 0]['source_id']
    logging.info('Found %d far away sources', len(bad_ids))
    bad_ids = set(
        src_id.split(';')[0] for src_id in python3compat.decode(bad_ids))
    bad_dupl = bad_ids & get_dupl(python3compat.decode(src_ids))
    if bad_dupl:
        logging.info('Duplicates %s not removed' % bad_dupl)
    ok = info['eff_ruptures'] > 0
    if ok.sum() == 0:
        raise RuntimeError('All sources were filtered away!')
    ok_ids = general.group_array(info[ok][['source_id', 'code']], 'source_id')
    with performance.Monitor() as mon:
        good, total = readinput.reduce_source_model(
            oqparam.inputs['source_model_logic_tree'], ok_ids)
    logging.info('Removed %d/%d sources', total - good, num_ids)
    print(mon)
Exemple #14
0
def export_from_datastore(output_key, calc_id, datadir, target):
    """
    :param output_key: a pair (ds_key, fmt)
    :param calc_id: calculation ID
    :param datadir: directory containing the datastore
    :param target: directory, temporary when called from the engine server
    """
    makedirs(target)
    ds_key, fmt = output_key
    dstore = datastore.read(calc_id, datadir=datadir)
    dstore.export_dir = target
    try:
        exported = export(output_key, dstore)
    except KeyError:
        raise DataStoreExportError(
            'Could not export %s in %s' % output_key)
    if not exported:
        raise DataStoreExportError(
            'Nothing to export for %s' % ds_key)
    elif len(exported) > 1:
        # NB: I am hiding the archive by starting its name with a '.',
        # to avoid confusing the users, since the unzip files are
        # already in the target directory; the archive is used internally
        # by the WebUI, so it must be there; it would be nice not to
        # generate it when not using the Web UI, but I will leave that
        # feature for after the removal of the old calculators
        archname = '.' + ds_key + '-' + fmt + '.zip'
        zipfiles(exported, os.path.join(target, archname))
        return os.path.join(target, archname)
    else:  # single file
        return exported[0]
Exemple #15
0
def main(calc_id):
    """
    Import a remote calculation into the local database. Server, username
    and password must be specified in the openquake.cfg file.
    NB: calc_id can be a local pathname to a datastore not already
    present in the database: in that case it is imported in the db.
    """
    dbserver.ensure_on()
    try:
        calc_id = int(calc_id)
    except ValueError:  # assume calc_id is a pathname
        remote = False
    else:
        remote = True
        job = logs.dbcmd('get_job', calc_id)
        if job is not None:
            sys.exit('There is already a job #%d in the local db' % calc_id)
    if remote:
        datadir = datastore.get_datadir()
        webex = WebExtractor(calc_id)
        hc_id = webex.oqparam.hazard_calculation_id
        if hc_id:
            sys.exit('The job has a parent (#%d) and cannot be '
                     'downloaded' % hc_id)
        webex.dump('%s/calc_%d.hdf5' % (datadir, calc_id))
        webex.close()
    with datastore.read(calc_id) as dstore:
        pprint.pprint(dstore.get_attrs('/'))
        engine.expose_outputs(dstore, status='complete')
    logging.info('Imported calculation %s successfully', calc_id)
Exemple #16
0
 def get_oqparam(self):
     """
     Return an OqParam object as read from the database
     """
     datadir = os.path.dirname(self.ds_calc_dir)
     dstore = datastore.read(self.id, datadir=datadir)
     return dstore['oqparam']
Exemple #17
0
def plot_sites(calc_id):
    """
    Plot the hazard sites of a calculations with one ore more a bounding boxes
    (a box for each tile). If point sources are present, prints them too as
    circles of radius `integration_distance + max rupture radius`.
    """
    import matplotlib.pyplot as p
    from matplotlib.patches import Rectangle
    dstore = datastore.read(calc_id)
    sitecol = dstore['sitecol']
    csm = dstore['composite_source_model']
    oq = dstore['oqparam']
    fig = p.figure()
    ax = fig.add_subplot(111)
    ax.grid(True)
    tiles = make_tiles(sitecol, oq.sites_per_tile, oq.maximum_distance)
    print 'There are %d tiles' % len(tiles)
    for tile in tiles:
        xs = []
        ys = []
        area = []
        for src in csm.get_sources():
            if src in tile and getattr(src, 'location', None):
                xs.append(src.location.x)
                ys.append(src.location.y)
                radius = src._get_max_rupture_projection_radius()
                r = (tile.maximum_distance[src.tectonic_region_type] +
                     radius) / tile.KM_ONE_DEGREE
                a = numpy.pi * r**2
                area.append(a)
        ax.add_patch(Rectangle(*tile.get_rectangle(), fill=False))
        p.scatter(tile.fix_lons(xs), ys, marker='o', s=area)
        p.scatter(tile.fix_lons(sitecol.lons), sitecol.lats, marker='+')
    p.show()
Exemple #18
0
    def run_calc(self, testfile, job_ini, **kw):
        """
        Return the outputs of the calculation as a dictionary
        """
        inis = job_ini.split(',')
        assert len(inis) in (1, 2), inis
        self.calc = self.get_calc(testfile, inis[0], **kw)
        self.edir = tempfile.mkdtemp()
        with self.calc._monitor:
            result = self.calc.run(export_dir=self.edir,
                                   exports=kw.get('exports', ''))
        self.calc.datastore.close()
        duration = {inis[0]: self.calc._monitor.duration}
        if len(inis) == 2:
            hc_id = self.calc.datastore.calc_id
            calc = self.get_calc(testfile,
                                 inis[1],
                                 hazard_calculation_id=str(hc_id),
                                 **kw)
            with calc._monitor:
                exported = calc.run(export_dir=self.edir,
                                    exports=kw.get('exports', ''))
                result.update(exported)
            duration[inis[1]] = calc._monitor.duration
            self.calc = calc

        # reopen datastore, since some tests need to export from it
        dstore = datastore.read(self.calc.datastore.calc_id)
        self.calc.datastore = dstore
        self.__class__.duration += duration
        return result
Exemple #19
0
 def get_rupdict(self):  # used in extract_rupture_info
     """
     :returns: a dictionary with the parameters of the rupture
     """
     assert len(self.proxies) == 1, 'Please specify a slice of length 1'
     dic = {'trt': self.trt}
     with datastore.read(self.filename) as dstore:
         rupgeoms = dstore['rupgeoms']
         rec = self.proxies[0].rec
         geom = rupgeoms[rec['id']]
         num_surfaces = int(geom[0])
         start = 2 * num_surfaces + 1
         dic['lons'], dic['lats'], dic['deps'] = [], [], []
         for i in range(1, num_surfaces * 2, 2):
             s1, s2 = int(geom[i]), int(geom[i + 1])
             size = s1 * s2 * 3
             arr = geom[start:start + size].reshape(3, -1)
             dic['lons'].append(arr[0])
             dic['lats'].append(arr[1])
             dic['deps'].append(arr[2])
             start += size
         rupclass, surclass = code2cls[rec['code']]
         dic['rupture_class'] = rupclass.__name__
         dic['surface_class'] = surclass.__name__
         dic['hypo'] = rec['hypo']
         dic['occurrence_rate'] = rec['occurrence_rate']
         dic['et_id'] = rec['et_id']
         dic['n_occ'] = rec['n_occ']
         dic['seed'] = rec['seed']
         dic['mag'] = rec['mag']
         dic['srcid'] = rec['source_id']
     return dic
Exemple #20
0
 def get_oqparam(self):
     """
     Return an OqParam object as read from the database
     """
     datadir = os.path.dirname(self.ds_calc_dir)
     dstore = datastore.read(self.id, datadir=datadir)
     return dstore['oqparam']
Exemple #21
0
def plot_sites(calc_id):
    """
    Plot the hazard sites of a calculations with one ore more a bounding boxes
    (a box for each tile). If point sources are present, prints them too as
    circles of radius `integration_distance + max rupture radius`.
    """
    import matplotlib.pyplot as p
    from matplotlib.patches import Rectangle
    dstore = datastore.read(calc_id)
    sitecol = dstore['sitecol']
    csm = dstore['composite_source_model']
    oq = dstore['oqparam']
    fig = p.figure()
    ax = fig.add_subplot(111)
    ax.grid(True)
    tiles = make_tiles(sitecol, oq.sites_per_tile, oq.maximum_distance)
    print('There are %d tiles' % len(tiles))
    for tile in tiles:
        xs = []
        ys = []
        area = []
        for src in csm.get_sources():
            if src in tile and getattr(src, 'location', None):
                xs.append(src.location.x)
                ys.append(src.location.y)
                radius = src._get_max_rupture_projection_radius()
                r = (tile.maximum_distance[src.tectonic_region_type] +
                     radius) / tile.KM_ONE_DEGREE
                a = numpy.pi * r ** 2
                area.append(a)
        ax.add_patch(Rectangle(*tile.get_rectangle(), fill=False))
        p.scatter(tile.fix_lons(xs), ys, marker='o', s=area)
        p.scatter(tile.fix_lons(sitecol.lons), sitecol.lats, marker='+')
    p.show()
Exemple #22
0
def web_engine_get_outputs(request, calc_id, **kwargs):
    job = logs.dbcmd('get_job', calc_id)
    with datastore.read(job.ds_calc_dir + '.hdf5') as ds:
        hmaps = 'png' in ds
    size_mb = '?' if job.size_mb is None else '%.2f' % job.size_mb
    return render(request, "engine/get_outputs.html",
                  dict(calc_id=calc_id, size_mb=size_mb, hmaps=hmaps))
Exemple #23
0
def expose_outputs(job_id):
    """
    Build a correspondence between the outputs in the datastore and the
    ones in the database.

    :param job_id: job ID
    """
    exportable = set(ekey[0] for ekey in export.export)
    job = models.OqJob.objects.get(pk=job_id)
    with datastore.read(
            job.id, datadir=os.path.dirname(job.ds_calc_dir)) as dstore:
        # small hack: remove the sescollection outputs from scenario
        # calculators, as requested by Vitor
        calcmode = job.calculation_mode
        if 'scenario' in calcmode and 'sescollection' in exportable:
            exportable.remove('sescollection')
        uhs = dstore['oqparam'].uniform_hazard_spectra
        if uhs and 'hmaps' in dstore:
            models.Output.objects.create_output(job, 'uhs', ds_key='uhs')
        for key in dstore:
            if key in exportable:
                if key == 'realizations' and len(dstore['realizations']) == 1:
                    continue  # do not export a single realization
                models.Output.objects.create_output(
                    job, DISPLAY_NAME.get(key, key), ds_key=key)
Exemple #24
0
def export_from_datastore(output_key, calc_id, datadir, target):
    """
    :param output_key: a pair (ds_key, fmt)
    :param calc_id: calculation ID
    :param datadir: directory containing the datastore
    :param target: directory, temporary when called from the engine server
    """
    makedirs(target)
    ds_key, fmt = output_key
    dstore = datastore.read(calc_id, datadir=datadir)
    dstore.export_dir = target
    try:
        exported = export(output_key, dstore)
    except KeyError:
        raise DataStoreExportError('Could not export %s in %s' % output_key)
    if not exported:
        raise DataStoreExportError('Nothing to export for %s' % ds_key)
    elif len(exported) > 1:
        # NB: I am hiding the archive by starting its name with a '.',
        # to avoid confusing the users, since the unzip files are
        # already in the target directory; the archive is used internally
        # by the WebUI, so it must be there; it would be nice not to
        # generate it when not using the Web UI, but I will leave that
        # feature for after the removal of the old calculators
        archname = '.' + ds_key + '-' + fmt + '.zip'
        zipfiles(exported, os.path.join(target, archname))
        return os.path.join(target, archname)
    else:  # single file
        return exported[0]
Exemple #25
0
    def pre_execute(self):
        """
        Check if there is a pre_calculator or a previous calculation ID.
        If yes, read the inputs by invoking the precalculator or by retrieving
        the previous calculation; if not, read the inputs directly.
        """
        job_info = hdf5.LiteralAttrs()
        if self.pre_calculator is not None:
            # the parameter hazard_calculation_id is only meaningful if
            # there is a precalculator
            precalc_id = self.oqparam.hazard_calculation_id
            if precalc_id is None:  # recompute everything
                precalc = calculators[self.pre_calculator](
                    self.oqparam, self.monitor("precalculator"), self.datastore.calc_id
                )
                precalc.run()
                if "scenario" not in self.oqparam.calculation_mode:
                    self.csm = precalc.csm
                pre_attrs = vars(precalc)
                for name in ("riskmodel", "assets_by_site"):
                    if name in pre_attrs:
                        setattr(self, name, getattr(precalc, name))
            else:  # read previously computed data
                parent = datastore.read(precalc_id)
                self.datastore.set_parent(parent)
                # copy missing parameters from the parent
                params = {
                    name: value for name, value in vars(parent["oqparam"]).items() if name not in vars(self.oqparam)
                }
                self.save_params(**params)
                self.read_risk_data()
            self.init()
        else:  # we are in a basic calculator
            self.read_risk_data()
            if "source" in self.oqparam.inputs:
                with self.monitor("reading composite source model", autoflush=True):
                    self.csm = readinput.get_composite_source_model(self.oqparam)
                    self.datastore["csm_info"] = self.csm.info
                    self.rup_data = {}

                    # we could manage limits here
                    vars(job_info).update(readinput.get_job_info(self.oqparam, self.csm, self.sitecol))
                    logging.info("Expected output size=%s", job_info.hazard["output_weight"])
                    logging.info("Total weight of the sources=%s", job_info.hazard["input_weight"])
            self.init()
            if "source" in self.oqparam.inputs:
                with self.monitor("managing sources", autoflush=True):
                    self.send_sources()
                self.manager.store_source_info(self.datastore)
                attrs = self.datastore.hdf5["composite_source_model"].attrs
                attrs["weight"] = self.csm.weight
                attrs["filtered_weight"] = self.csm.filtered_weight
                attrs["maxweight"] = self.csm.maxweight

        job_info.hostname = socket.gethostname()
        if hasattr(self, "riskmodel"):
            job_info.require_epsilons = bool(self.riskmodel.covs)
        self.job_info = job_info
        self.datastore.flush()
def event_based_damage(df, param, monitor):
    """
    :param df: a DataFrame of GMFs with fields sid, eid, gmv_X, ...
    :param param: a dictionary of parameters coming from the job.ini
    :param monitor: a Monitor instance
    :returns: (damages (eid, kid) -> LDc plus damages (A, Dc))
    """
    mon_risk = monitor('computing risk', measuremem=False)
    dstore = datastore.read(param['hdf5path'])
    K = param['K']
    with monitor('reading data'):
        if hasattr(df, 'start'):  # it is actually a slice
            df = dstore.read_df('gmf_data', slc=df)
        assets_df = dstore.read_df('assetcol/array', 'ordinal')
        kids = (dstore['assetcol/kids'][:] if K else numpy.zeros(
            len(assets_df), U16))
        crmodel = monitor.read('crmodel')
    dmg_csq = crmodel.get_dmg_csq()
    ci = {dc: i + 1 for i, dc in enumerate(dmg_csq)}
    dmgcsq = zero_dmgcsq(assets_df, crmodel)  # shape (A, L, Dc)
    A, L, Dc = dmgcsq.shape
    D = len(crmodel.damage_states)
    loss_names = crmodel.oqparam.loss_names
    with mon_risk:
        dddict = general.AccumDict(accum=numpy.zeros((L, Dc), F32))  # eid, kid
        for sid, asset_df in assets_df.groupby('site_id'):
            # working one site at the time
            gmf_df = df[df.sid == sid]
            if len(gmf_df) == 0:
                continue
            eids = gmf_df.eid.to_numpy()
            for taxo, adf in asset_df.groupby('taxonomy'):
                out = crmodel.get_output(taxo, adf, gmf_df)
                aids = adf.index.to_numpy()
                for lti, lt in enumerate(loss_names):
                    fractions = out[lt]
                    Asid, E, D = fractions.shape
                    ddd = numpy.zeros((Asid, E, Dc), F32)
                    ddd[:, :, :D] = fractions
                    for a, asset in enumerate(adf.to_records()):
                        # NB: uncomment the lines below to see the performance
                        # disaster of scenario_damage.bin_ddd; for instance
                        # the Messina test in oq-risk-tests becomes 10x
                        # slower even if it has only 25_736 assets:
                        # scenario_damage.bin_ddd(
                        #     fractions[a], asset['value-number'],
                        #     param['master_seed'] + a)
                        ddd[a] *= asset['value-number']
                        csq = crmodel.compute_csq(asset, fractions[a], lt)
                        for name, values in csq.items():
                            ddd[a, :, ci[name]] = values
                    dmgcsq[aids, lti] += ddd.sum(axis=1)  # sum on the events
                    tot = ddd.sum(axis=0)  # sum on the assets
                    for e, eid in enumerate(eids):
                        dddict[eid, K][lti] += tot[e]
                        if K:
                            for a, aid in enumerate(aids):
                                dddict[eid, kids[aid]][lti] += ddd[a, e]
    return to_dframe(dddict, ci, L), dmgcsq
Exemple #27
0
def export(datastore_key, export_dir='.', calc_id=-1, exports='csv'):
    """
    Export an output from the datastore.
    """
    logging.basicConfig(level=logging.INFO)
    dstore = datastore.read(calc_id)
    parent_id = dstore['oqparam'].hazard_calculation_id
    if parent_id:
        dstore.set_parent(datastore.read(parent_id))
    dstore.export_dir = export_dir
    with performance.Monitor('export', measuremem=True) as mon:
        for fmt in exports.split(','):
            fnames = export_((datastore_key, fmt), dstore)
            nbytes = sum(os.path.getsize(f) for f in fnames)
            print('Exported %s in %s' % (general.humansize(nbytes), fnames))
    if mon.duration > 1:
        print(mon)
Exemple #28
0
def main(datastore_key, calc_id: int = -1, *, exports='csv', export_dir='.'):
    """
    Export an output from the datastore. To see the available datastore
    keys, use the command `oq info exports`.
    """
    dstore = datastore.read(calc_id)
    parent_id = dstore['oqparam'].hazard_calculation_id
    if parent_id:
        dstore.parent = datastore.read(parent_id)
    dstore.export_dir = export_dir
    with performance.Monitor('export', measuremem=True) as mon:
        for fmt in exports.split(','):
            fnames = export_((datastore_key, fmt), dstore)
            nbytes = sum(os.path.getsize(f) for f in fnames)
            print('Exported %s in %s' % (general.humansize(nbytes), fnames))
    if mon.duration > 1:
        print(mon)
    dstore.close()
Exemple #29
0
def plot_ac(calc_id):
    """
    Aggregate loss curves plotter.
    """
    # read the hazard data
    dstore = datastore.read(calc_id)
    agg_curve = dstore['agg_curve-rlzs']
    plt = make_figure(agg_curve)
    plt.show()
Exemple #30
0
 def read_previous(self, precalc_id):
     parent = datastore.read(precalc_id)
     self.datastore.set_parent(parent)
     # copy missing parameters from the parent
     params = {name: value for name, value in
               vars(parent['oqparam']).items()
               if name not in vars(self.oqparam)}
     self.save_params(**params)
     self.read_risk_data()
Exemple #31
0
def plot_ac(calc_id):
    """
    Aggregate loss curves plotter.
    """
    # read the hazard data
    dstore = datastore.read(calc_id)
    agg_curve = dstore['agg_curve-rlzs']
    plt = make_figure(agg_curve)
    plt.show()
Exemple #32
0
def plot_lc(calc_id, aid):
    """
    Plot loss curves given a calculation id and an asset ordinal.
    """
    # read the hazard data
    dstore = datastore.read(calc_id)
    oq = dstore['oqparam']
    asset = dstore['assetcol'][aid]
    plt = make_figure(asset, oq.loss_ratios, dstore['rcurves-stats'][aid])
    plt.show()
Exemple #33
0
def plot_lc(calc_id, aid):
    """
    Plot loss curves given a calculation id and an asset ordinal.
    """
    # read the hazard data
    dstore = datastore.read(calc_id)
    oq = dstore['oqparam']
    asset = dstore['assetcol'][aid]
    plt = make_figure(asset, oq.loss_ratios, dstore['rcurves-stats'][aid])
    plt.show()
Exemple #34
0
    def execute(self):
        oq = self.oqparam
        self.set_param()
        self.offset = 0
        if oq.hazard_calculation_id:  # from ruptures
            self.datastore.parent = datastore.read(oq.hazard_calculation_id)
        elif hasattr(self, 'csm'):  # from sources
            self.build_events_from_sources()
            if (oq.ground_motion_fields is False
                    and oq.hazard_curves_from_gmfs is False):
                return {}
        elif 'rupture_model' not in oq.inputs:
            logging.warning(
                'There is no rupture_model, the calculator will just '
                'import data without performing any calculation')
            fake = logictree.FullLogicTree.fake()
            self.datastore['full_lt'] = fake  # needed to expose the outputs
            self.datastore['weights'] = [1.]
            return {}
        else:  # scenario
            self._read_scenario_ruptures()
            if (oq.ground_motion_fields is False
                    and oq.hazard_curves_from_gmfs is False):
                return {}

        if oq.ground_motion_fields:
            imts = oq.get_primary_imtls()
            nrups = len(self.datastore['ruptures'])
            base.create_gmf_data(self.datastore, imts, oq.get_sec_imts())
            self.datastore.create_dset('gmf_data/sigma_epsilon',
                                       sig_eps_dt(oq.imtls))
            self.datastore.create_dset('gmf_data/time_by_rup',
                                       time_dt, (nrups, ),
                                       fillvalue=None)

        # compute_gmfs in parallel
        nr = len(self.datastore['ruptures'])
        logging.info('Reading {:_d} ruptures'.format(nr))
        allargs = [(rgetter, self.param) for rgetter in gen_rupture_getters(
            self.datastore, oq.concurrent_tasks)]
        # reading the args is fast since we are not prefiltering the ruptures,
        # nor reading the geometries; using an iterator would cause the usual
        # damned h5py error, last seen on macos
        self.datastore.swmr_on()
        smap = parallel.Starmap(self.core_task.__func__,
                                allargs,
                                h5=self.datastore.hdf5)
        smap.monitor.save('srcfilter', self.srcfilter)
        acc = smap.reduce(self.agg_dicts, self.acc0())
        if 'gmf_data' not in self.datastore:
            return acc
        if oq.ground_motion_fields:
            with self.monitor('saving avg_gmf', measuremem=True):
                self.save_avg_gmf()
        return acc
Exemple #35
0
def get_oqparam(request, job_id):
    """
    Return the calculation parameters as a JSON
    """
    try:
        job = logs.dbcmd('get_job', int(job_id), getpass.getuser())
    except dbapi.NotFound:
        return HttpResponseNotFound()
    with datastore.read(job.ds_calc_dir + '.hdf5') as ds:
        oq = ds['oqparam']
    return HttpResponse(content=json.dumps(vars(oq)), content_type=JSON)
Exemple #36
0
 def read_previous(self, precalc_id):
     parent = datastore.read(precalc_id)
     self.datastore.set_parent(parent)
     # copy missing parameters from the parent
     params = {
         name: value
         for name, value in vars(parent['oqparam']).items()
         if name not in vars(self.oqparam)
     }
     self.save_params(**params)
     self.read_risk_data()
def make_report(isodate='today'):
    """
    Build a HTML report with the computations performed at the given isodate.
    Return the name of the report, which is saved in the current directory.
    """
    if isodate == 'today':
        isodate = date.today()
    else:
        isodate = date(*time.strptime(isodate, '%Y-%m-%d')[:3])
    isodate1 = isodate + timedelta(1)  # +1 day

    tag_ids = []
    tag_status = []
    tag_contents = []

    # the fetcher returns an header which is stripped with [1:]
    jobs = dbcmd(
        'fetch', ALL_JOBS, isodate.isoformat(), isodate1.isoformat())[1:]
    page = '<h2>%d job(s) finished before midnight of %s</h2>' % (
        len(jobs), isodate)
    for job_id, user, status, ds_calc in jobs:
        tag_ids.append(job_id)
        tag_status.append(status)
        stats = dbcmd('fetch', JOB_STATS, job_id)
        if not stats[1:]:
            continue
        (job_id, user, start_time, stop_time, status) = stats[1]
        try:
            ds = read(job_id, datadir=os.path.dirname(ds_calc))
            txt = view_fullreport('fullreport', ds).decode('utf-8')
            report = html_parts(txt)
        except Exception as exc:
            report = dict(
                html_title='Could not generate report: %s' % cgi.escape(
                    unicode(exc), quote=True),
                fragment='')

        page = report['html_title']

        add_duration(stats)
        page += html(stats)

        page += report['fragment']

        tag_contents.append(page)

    page = make_tabs(tag_ids, tag_status, tag_contents) + (
        'Report last updated: %s' % datetime.now())
    fname = 'jobs-%s.html' % isodate
    with open(fname, 'w') as f:
        f.write(PAGE_TEMPLATE % page.encode('utf-8'))
    return fname
Exemple #38
0
def purge(calc_id):
    """
    Remove the given calculation. If calc_id is 0, remove all calculations.
    """
    if not calc_id:
        for fname in os.listdir(datastore.DATADIR):
            if re.match('calc_\d+\.hdf5', fname):
                os.remove(os.path.join(datastore.DATADIR, fname))
                print('Removed %s' % fname)
    else:
        hdf5path = datastore.read(calc_id).hdf5path
        os.remove(hdf5path)
        print('Removed %s' % hdf5path)
Exemple #39
0
 def read_previous(self, precalc_id):
     parent = datastore.read(precalc_id)
     check_precalc_consistency(self.oqparam.calculation_mode,
                               parent['oqparam'].calculation_mode)
     self.datastore.parent = parent
     # copy missing parameters from the parent
     params = {
         name: value
         for name, value in vars(parent['oqparam']).items()
         if name not in vars(self.oqparam)
     }
     self.save_params(**params)
     self.read_risk_data()
Exemple #40
0
 def get_proxies(self, min_mag=0):
     """
     :returns: a list of RuptureProxies
     """
     proxies = []
     with datastore.read(self.filename) as dstore:
         rupgeoms = dstore['rupgeoms']
         for proxy in self.proxies:
             if proxy['mag'] < min_mag:
                 continue
             proxy.geom = rupgeoms[proxy['geom_id']]
             proxies.append(proxy)
     return proxies
def event_based_damage(df, param, monitor):
    """
    :param df: a DataFrame of GMFs with fields sid, eid, gmv_...
    :param param: a dictionary of parameters coming from the job.ini
    :param monitor: a Monitor instance
    :returns: (damages (eid, kid) -> LDc plus damages (A, Dc))
    """
    mon_risk = monitor('computing risk', measuremem=False)
    dstore = datastore.read(param['hdf5path'])
    K = param['K']
    with monitor('reading data'):
        if hasattr(df, 'start'):  # it is actually a slice
            df = dstore.read_df('gmf_data', slc=df)
        assets_df = dstore.read_df('assetcol/array', 'ordinal')
        kids = (dstore['assetcol/kids'][:] if K else numpy.zeros(
            len(assets_df), U16))
        crmodel = monitor.read('crmodel')
    dmg_csq = crmodel.get_dmg_csq()
    ci = {dc: i + 1 for i, dc in enumerate(dmg_csq)}
    dmgcsq = zero_dmgcsq(assets_df, crmodel)
    A, L, Dc = dmgcsq.shape
    D = len(crmodel.damage_states)
    with mon_risk:
        dddict = general.AccumDict(accum=numpy.zeros((L, Dc), F32))  # eid, kid
        for taxo, asset_df in assets_df.groupby('taxonomy'):
            for sid, adf in asset_df.groupby('site_id'):
                gmf_df = df[df.sid == sid]
                if len(gmf_df) == 0:
                    continue
                # working one site at the time
                out = crmodel.get_output(taxo, adf, gmf_df)
                eids = out['eids']
                aids = out['assets']['ordinal']
                for lti, lt in enumerate(out['loss_types']):
                    fractions = out[lt]
                    Asid, E, D = fractions.shape
                    ddd = numpy.zeros((Asid, E, Dc), F32)
                    ddd[:, :, :D] = fractions
                    for a, asset in enumerate(out['assets']):
                        ddd[a] *= asset['number']
                        csq = crmodel.compute_csq(asset, fractions[a], lt)
                        for name, values in csq.items():
                            ddd[a, :, ci[name]] = values
                    dmgcsq[aids, lti] += ddd.sum(axis=1)  # sum on the events
                    tot = ddd.sum(axis=0)  # sum on the assets
                    for e, eid in enumerate(eids):
                        dddict[eid, K][lti] += tot[e]
                        if K:
                            for a, aid in enumerate(aids):
                                dddict[eid, kids[aid]][lti] += ddd[a, e]
    return to_dframe(dddict, ci, L), dmgcsq
def make_report(conn, isodate='today'):
    """
    Build a HTML report with the computations performed at the given isodate.
    Return the name of the report, which is saved in the current directory.
    """
    if isodate == 'today':
        isodate = datetime.date.today().isoformat()
    curs = conn.cursor()
    fetcher = Fetcher(curs)
    tag_ids = []
    tag_status = []
    tag_contents = []

    jobs = fetcher.query(ALL_JOBS, isodate, isodate)[1:]
    page = '<h2>%d job(s) finished before midnight of %s</h2>' % (
        len(jobs), isodate)
    for job_id, user, status, ds_calc in jobs:
        tag_ids.append(job_id)
        tag_status.append(status)
        stats = fetcher.query(JOB_STATS, job_id)[1:]
        if not stats:
            continue
        (job_id, user, start_time, stop_time, status, duration) = stats[0]
        try:
            ds = read(job_id, datadir=os.path.dirname(ds_calc))
            txt = view_fullreport('fullreport', ds).decode('utf-8')
            report = html_parts(txt)
        except Exception as exc:
            report = dict(
                html_title='Could not generate report: %s' % cgi.escape(
                    unicode(exc), quote=True),
                fragment='')

        page = report['html_title']

        job_stats = html(fetcher.query(JOB_STATS, job_id))
        page += job_stats

        page += report['fragment']

        tag_contents.append(page)

    page = make_tabs(tag_ids, tag_status, tag_contents) + (
        'Report last updated: %s' % datetime.datetime.now())
    fname = 'jobs-%s.html' % isodate
    with open(fname, 'w') as f:
        f.write(PAGE_TEMPLATE % page.encode('utf-8'))
    return fname
Exemple #43
0
def show_attrs(key, calc_id=-1):
    """
    Show the attributes of a HDF5 dataset in the datastore.
    """
    ds = datastore.read(calc_id)
    try:
        attrs = h5py.File.__getitem__(ds.hdf5, key).attrs
    except KeyError:
        print('%r is not in %s' % (key, ds))
    else:
        if len(attrs) == 0:
            print('%s has no attributes' % key)
        for name, value in attrs.items():
            print(name, value)
    finally:
        ds.close()
Exemple #44
0
def get_mesh(oqparam):
    """
    Extract the mesh of points to compute from the sites,
    the sites_csv, or the region.

    :param oqparam:
        an :class:`openquake.commonlib.oqvalidation.OqParam` instance
    """
    if oqparam.sites:
        lons, lats = zip(*sorted(oqparam.sites))
        return geo.Mesh(numpy.array(lons), numpy.array(lats))
    elif 'sites' in oqparam.inputs:
        csv_data = open(oqparam.inputs['sites'], 'U').read()
        coords = valid.coordinates(
            csv_data.strip().replace(',', ' ').replace('\n', ','))
        lons, lats = zip(*sorted(coords))
        return geo.Mesh(numpy.array(lons), numpy.array(lats))
    elif oqparam.region:
        # close the linear polygon ring by appending the first
        # point to the end
        firstpoint = geo.Point(*oqparam.region[0])
        points = [geo.Point(*xy) for xy in oqparam.region] + [firstpoint]
        try:
            mesh = geo.Polygon(points).discretize(oqparam.region_grid_spacing)
            lons, lats = zip(*sorted(zip(mesh.lons, mesh.lats)))
            return geo.Mesh(numpy.array(lons), numpy.array(lats))
        except:
            raise ValueError(
                'Could not discretize region %(region)s with grid spacing '
                '%(region_grid_spacing)s' % vars(oqparam))
    elif 'gmfs' in oqparam.inputs:
        return get_gmfs(oqparam)[0].mesh
    elif oqparam.hazard_calculation_id:
        sitecol = datastore.read(oqparam.hazard_calculation_id)['sitecol']
        return geo.Mesh(sitecol.lons, sitecol.lats)
    elif 'exposure' in oqparam.inputs:
        # the mesh is extracted from get_sitecol_assets
        return
    elif 'site_model' in oqparam.inputs:
        coords = [(param.lon, param.lat) for param in get_site_model(oqparam)]
        lons, lats = zip(*sorted(coords))
        return geo.Mesh(numpy.array(lons), numpy.array(lats))
Exemple #45
0
 def run_calc(self, testfile, job_ini, **kw):
     """
     Return the outputs of the calculation as a dictionary
     """
     inis = job_ini.split(',')
     assert len(inis) in (1, 2), inis
     self.calc = self.get_calc(testfile, inis[0], **kw)
     with self.calc.monitor:
         result = self.calc.run()
     if len(inis) == 2:
         hc_id = self.calc.datastore.calc_id
         self.calc = self.get_calc(
             testfile, inis[1], hazard_calculation_id=str(hc_id), **kw)
         with self.calc.monitor:
             result.update(self.calc.run())
     # reopen datastore, since some tests need to export from it
     dstore = datastore.read(self.calc.datastore.calc_id)
     dstore.export_dir = dstore['oqparam'].export_dir
     self.calc.datastore = dstore
     return result
Exemple #46
0
def plot_sites(calc_id):
    """
    Plot the sites and the bounding boxes of the sources, enlarged by
    the maximum distance
    """
    import matplotlib.pyplot as p
    from matplotlib.patches import Rectangle
    dstore = datastore.read(calc_id)
    sitecol = dstore['sitecol']
    csm = dstore['composite_source_model']
    oq = dstore['oqparam']
    rfilter = RtreeFilter(sitecol, oq.maximum_distance)
    fig = p.figure()
    ax = fig.add_subplot(111)
    ax.grid(True)
    for src in csm.get_sources():
        llcorner, width, height = rfilter.get_rectangle(src)
        ax.add_patch(Rectangle(llcorner, width, height, fill=False))
    p.scatter(sitecol.lons, sitecol.lats, marker='+')
    p.show()
Exemple #47
0
 def test_read(self):
     # cas of a non-existing directory
     with self.assertRaises(IOError):
         read(42, datadir="/fake/directory")
     # case of a non-existing file
     with self.assertRaises(IOError):
         read(42, datadir="/tmp")
     # case of no read permission
     tmp = tempfile.mkdtemp()
     fname = os.path.join(tmp, "calc_42.hdf5")
     open(fname, "w").write("")
     os.chmod(fname, 0)
     with self.assertRaises(IOError) as ctx:
         read(42, datadir=tmp)
     self.assertIn("Permission denied:", str(ctx.exception))
 def test_read(self):
     # cas of a non-existing directory
     with self.assertRaises(IOError):
         read(42, datadir='/fake/directory')
     # case of a non-existing file
     with self.assertRaises(IOError):
         read(42, datadir='/tmp')
     # case of no read permission
     tmp = tempfile.mkdtemp()
     fname = os.path.join(tmp, 'calc_42.hdf5')
     open(fname, 'w').write('')
     os.chmod(fname, 0)
     with self.assertRaises(IOError) as ctx:
         read(42, datadir=tmp)
     self.assertIn('Permission denied:', str(ctx.exception))
Exemple #49
0
    def test_spatial_correlation(self):
        expected = {sc1: [0.99, 0.41],
                    sc2: [0.99, 0.64],
                    sc3: [0.99, 0.22]}

        for case in expected:
            self.run_calc(case.__file__, 'job.ini')
            oq = self.calc.oqparam
            self.assertEqual(list(oq.imtls), ['PGA'])
            dstore = read(self.calc.datastore.calc_id)
            gmf = group_array(dstore['gmf_data/0000'], 'sid')
            gmvs_site_0 = gmf[0]['gmv']
            gmvs_site_1 = gmf[1]['gmv']
            joint_prob_0_5 = joint_prob_of_occurrence(
                gmvs_site_0, gmvs_site_1, 0.5, oq.investigation_time,
                oq.ses_per_logic_tree_path)
            joint_prob_1_0 = joint_prob_of_occurrence(
                gmvs_site_0, gmvs_site_1, 1.0, oq.investigation_time,
                oq.ses_per_logic_tree_path)

            p05, p10 = expected[case]
            numpy.testing.assert_almost_equal(joint_prob_0_5, p05, decimal=1)
            numpy.testing.assert_almost_equal(joint_prob_1_0, p10, decimal=1)
Exemple #50
0
def main():
    arg_parser = set_up_arg_parser()

    args = arg_parser.parse_args()

    exports = args.exports or 'xml,csv'

    if args.version:
        print __version__
        sys.exit(0)

    if args.run or args.run_hazard or args.run_risk:
        # the logging will be configured in engine.py
        pass
    else:
        # configure a basic logging
        logging.basicConfig(level=logging.INFO)

    if args.config_file:
        os.environ[utils.config.OQ_CONFIG_FILE_VAR] = \
            abspath(expanduser(args.config_file))
        utils.config.refresh()

    if args.no_distribute:
        os.environ[openquake.engine.NO_DISTRIBUTE_VAR] = '1'

    if args.upgrade_db:
        logs.set_level('info')
        msg = upgrade_manager.what_if_I_upgrade(
            conn, extract_scripts='read_scripts')
        print msg
        if msg.startswith('Your database is already updated'):
            pass
        elif args.yes or utils.confirm('Proceed? (y/n) '):
            upgrade_manager.upgrade_db(conn)
        sys.exit(0)

    if args.version_db:
        print upgrade_manager.version_db(conn)
        sys.exit(0)

    if args.what_if_I_upgrade:
        print upgrade_manager.what_if_I_upgrade(conn)
        sys.exit(0)

    # check if the db is outdated
    outdated = dbcmd('check_outdated')
    if outdated:
        sys.exit(outdated)

    # hazard or hazard+risk
    hc_id = args.hazard_calculation_id
    if hc_id and int(hc_id) < 0:
        # make it possible commands like `oq-engine --run job_risk.ini --hc -1`
        hc_id = dbcmd('get_hc_id', int(hc_id))
    if args.run:
        job_inis = map(expanduser, args.run.split(','))
        if len(job_inis) not in (1, 2):
            sys.exit('%s should be a .ini filename or a pair of filenames '
                     'separated by a comma' % args.run)
        for job_ini in job_inis:
            open(job_ini).read()  # raise an IOError if the file does not exist
        log_file = expanduser(args.log_file) \
            if args.log_file is not None else None

        if len(job_inis) == 2:
            # run hazard
            job_id = run_job(job_inis[0], args.log_level,
                             log_file, args.exports)
            # run risk
            run_job(job_inis[1], args.log_level, log_file,
                    args.exports, hazard_calculation_id=job_id)
        else:
            run_job(
                expanduser(args.run), args.log_level, log_file,
                args.exports, hazard_calculation_id=hc_id)
    # hazard
    elif args.list_hazard_calculations:
        dbcmd('list_calculations', 'hazard')
    elif args.run_hazard is not None:
        log_file = expanduser(args.log_file) \
            if args.log_file is not None else None
        run_job(expanduser(args.run_hazard), args.log_level,
                log_file, args.exports)
    elif args.delete_calculation is not None:
        dbcmd('delete_calculation', args.delete_calculation, args.yes)
    # risk
    elif args.list_risk_calculations:
        dbcmd('list_calculations', 'risk')
    elif args.run_risk is not None:
        if args.hazard_calculation_id is None:
            sys.exit(MISSING_HAZARD_MSG)
        log_file = expanduser(args.log_file) \
            if args.log_file is not None else None
        run_job(
            expanduser(args.run_risk),
            args.log_level, log_file, args.exports,
            hazard_calculation_id=hc_id)

    # export
    elif args.make_html_report:
        print 'Written', make_report(conn, args.make_html_report)
        sys.exit(0)

    elif args.list_outputs is not None:
        hc_id = dbcmd('get_hc_id', args.list_outputs)
        dbcmd('list_outputs', hc_id)
    elif args.show_view is not None:
        job_id, view_name = args.show_view
        print views.view(view_name, datastore.read(int(job_id)))
    elif args.show_log is not None:
        hc_id = dbcmd('get_hc_id', args.show_log[0])
        print dbcmd('get_log', hc_id)

    elif args.export_output is not None:
        output_id, target_dir = args.export_output
        dbcmd('export_output', int(output_id), expanduser(target_dir),
              exports)

    elif args.export_outputs is not None:
        job_id, target_dir = args.export_outputs
        hc_id = dbcmd('get_hc_id', job_id)
        dbcmd('export_outputs', hc_id, expanduser(target_dir), exports)

    elif args.delete_uncompleted_calculations:
        dbcmd('delete_uncompleted_calculations')
    else:
        arg_parser.print_usage()
Exemple #51
0
def main():
    arg_parser = set_up_arg_parser()

    args = arg_parser.parse_args()

    exports = args.exports or 'xml,csv'

    if args.version:
        print openquake.engine.__version__
        sys.exit(0)

    if args.run or args.run_hazard or args.run_risk:
        # the logging will be configured in engine.py
        pass
    else:
        # configure a basic logging
        logging.basicConfig(level=logging.INFO)

    if args.config_file:
        os.environ[config.OQ_CONFIG_FILE_VAR] = \
            abspath(expanduser(args.config_file))
        config.refresh()

    if args.no_distribute:
        os.environ['OQ_DISTRIBUTE'] = 'no'

    # check if the DbServer is up
    sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    try:
        err = sock.connect_ex(config.DBS_ADDRESS)
    finally:
        sock.close()
    if err:
        multi_user = valid.boolean(config.get('dbserver', 'multi_user'))
        if multi_user:
            sys.exit('Please start the DbServer: '
                     'see the documentation for details')
        # otherwise start the DbServer automatically
        dblog = os.path.expanduser('~/oq-dbserver.log')
        subprocess.Popen([sys.executable, '-m', 'openquake.server.dbserver',
                          '-l', 'INFO'], stderr=open(dblog, 'w'))
    if args.upgrade_db:
        logs.set_level('info')
        msg = logs.dbcmd('what_if_I_upgrade', 'read_scripts')
        if msg.startswith('Your database is already updated'):
            pass
        elif args.yes or confirm('Proceed? (y/n) '):
            logs.dbcmd('upgrade_db')
        sys.exit(0)

    if args.version_db:
        print logs.dbcmd('version_db')
        sys.exit(0)

    if args.what_if_I_upgrade:
        print logs.dbcmd('what_if_I_upgrade', 'extract_upgrade_scripts')
        sys.exit(0)

    # check if the db is outdated
    outdated = logs.dbcmd('check_outdated')
    if outdated:
        sys.exit(outdated)

    # hazard or hazard+risk
    if args.hazard_calculation_id:
        hc_id = get_job_id(args.hazard_calculation_id)
    else:
        hc_id = None
    if args.run:
        job_inis = map(expanduser, args.run.split(','))
        if len(job_inis) not in (1, 2):
            sys.exit('%s should be a .ini filename or a pair of filenames '
                     'separated by a comma' % args.run)
        for job_ini in job_inis:
            open(job_ini).read()  # raise an IOError if the file does not exist
        log_file = expanduser(args.log_file) \
            if args.log_file is not None else None

        if len(job_inis) == 2:
            # run hazard
            job_id = run_job(job_inis[0], args.log_level,
                             log_file, args.exports)
            # run risk
            run_job(job_inis[1], args.log_level, log_file,
                    args.exports, hazard_calculation_id=job_id)
        else:
            run_job(
                expanduser(args.run), args.log_level, log_file,
                args.exports, hazard_calculation_id=hc_id)
    # hazard
    elif args.list_hazard_calculations:
        for line in logs.dbcmd(
                'list_calculations', 'hazard', getpass.getuser()):
            print line
    elif args.run_hazard is not None:
        log_file = expanduser(args.log_file) \
            if args.log_file is not None else None
        run_job(expanduser(args.run_hazard), args.log_level,
                log_file, args.exports)
    elif args.delete_calculation is not None:
        delete_calculation(args.delete_calculation, args.yes)
    # risk
    elif args.list_risk_calculations:
        for line in logs.dbcmd('list_calculations', 'risk', getpass.getuser()):
            print line
    elif args.run_risk is not None:
        if args.hazard_calculation_id is None:
            sys.exit(MISSING_HAZARD_MSG)
        log_file = expanduser(args.log_file) \
            if args.log_file is not None else None
        run_job(
            expanduser(args.run_risk),
            args.log_level, log_file, args.exports,
            hazard_calculation_id=hc_id)

    # export
    elif args.make_html_report:
        print 'Written', make_report(args.make_html_report)
        sys.exit(0)

    elif args.list_outputs is not None:
        hc_id = get_job_id(args.list_outputs)
        for line in logs.dbcmd('list_outputs', hc_id):
            print line
    elif args.show_view is not None:
        job_id, view_name = args.show_view
        print views.view(view_name, datastore.read(int(job_id)))
    elif args.show_log is not None:
        hc_id = get_job_id(args.show_log[0])
        for line in logs.dbcmd('get_log', hc_id):
            print line

    elif args.export_output is not None:
        output_id, target_dir = args.export_output
        dskey, calc_id, datadir = logs.dbcmd('get_output', int(output_id))
        for line in core.export_output(
                dskey, calc_id, datadir, expanduser(target_dir), exports):
            print line

    elif args.export_outputs is not None:
        job_id, target_dir = args.export_outputs
        hc_id = get_job_id(job_id)
        for line in core.export_outputs(
                hc_id, expanduser(target_dir), exports):
            print line

    elif args.delete_uncompleted_calculations:
        logs.dbcmd('delete_uncompleted_calculations', getpass.getuser())
    else:
        arg_parser.print_usage()
Exemple #52
0
 def read(calc_id):
     job = logs.dbcmd('get_job', calc_id, getpass.getuser())
     datadir = os.path.dirname(job.ds_calc_dir)
     return datastore.read(job.id, datadir=datadir)