Exemple #1
0
def export_hcurves_csv(ekey, dstore):
    """
    Exports the hazard curves into several .csv files

    :param ekey: export key, i.e. a pair (datastore key, fmt)
    :param dstore: datastore object
    """
    oq = dstore['oqparam']
    rlzs_assoc = dstore['csm_info'].get_rlzs_assoc()
    sitecol = dstore['sitecol']
    sitemesh = get_mesh(sitecol)
    key, fmt = ekey
    fnames = []
    items = dstore['hmaps' if key == 'uhs' else key].items()
    for kind, hcurves in sorted(items):
        fname = hazard_curve_name(dstore, ekey, kind, rlzs_assoc)
        if key == 'uhs':
            uhs_curves = calc.make_uhs(hcurves, oq.imtls, oq.poes)
            write_csv(fname, util.compose_arrays(sitemesh, uhs_curves))
            fnames.append(fname)
        elif key == 'hmaps':
            write_csv(fname, util.compose_arrays(sitemesh, hcurves))
            fnames.append(fname)
        else:
            if export.from_db:  # called by export_from_db
                fnames.extend(
                    export_hcurves_by_imt_csv(
                        ekey, fname, sitecol, hcurves, oq.imtls))
            else:  # when exporting directly from the datastore
                fnames.extend(
                    export_hazard_curves_csv(
                        ekey, fname, sitecol, hcurves, oq.imtls))

    return sorted(fnames)
Exemple #2
0
def export_ses_csv(ekey, dstore):
    """
    :param ekey: export key, i.e. a pair (datastore key, fmt)
    :param dstore: datastore object
    """
    if 'events' not in dstore:  # scenario
        return []
    dest = dstore.export_path('ruptures.csv')
    header = ('id mag centroid_lon centroid_lat centroid_depth trt '
              'strike dip rake boundary').split()
    csm_info = dstore['csm_info']
    grp_trt = csm_info.grp_trt()
    sm_by_grp = csm_info.get_sm_by_grp()
    rows = []
    for grp_id, trt in sorted(grp_trt.items()):
        sm = 'sm-%04d' % sm_by_grp[grp_id]
        etags = build_etags(dstore['events/' + sm])
        dic = groupby(etags, util.get_serial)
        for r in dstore['rup_data/grp-%02d' % grp_id]:
            for etag in dic[r['rupserial']]:
                boundary = 'MULTIPOLYGON(%s)' % r['boundary']
                rows.append(
                    (etag, r['mag'], r['lon'], r['lat'], r['depth'],
                     trt, r['strike'], r['dip'], r['rake'], boundary))
    rows.sort(key=operator.itemgetter(0))
    writers.write_csv(dest, rows, header=header)
    return [dest]
Exemple #3
0
def export_ebr(ekey, dstore):
    assets = get_assets_sites(dstore)
    outs = extract_outputs(ekey[0], dstore, ekey[1])
    for out in outs:
        writers.write_csv(
            out.path, compose_arrays(assets, out.array), fmt='%9.7E')
    return [out.path for out in outs]
Exemple #4
0
def export_hcurves_csv(ekey, dstore):
    """
    Exports the hazard curves into several .csv files

    :param ekey: export key, i.e. a pair (datastore key, fmt)
    :param dstore: datastore object
    """
    oq = dstore['oqparam']
    rlzs_assoc = dstore['csm_info'].get_rlzs_assoc()
    sitecol = dstore['sitecol']
    sitemesh = dstore['sitemesh']
    key, fmt = ekey
    fnames = []
    items = dstore['hmaps' if key == 'uhs' else key].items()
    for kind, hcurves in sorted(items):
        fname = hazard_curve_name(
            dstore, ekey, kind, rlzs_assoc, oq.number_of_logic_tree_samples)
        if key == 'uhs':
            uhs_curves = calc.make_uhs(hcurves, oq.imtls, oq.poes)
            write_csv(fname, util.compose_arrays(sitemesh, uhs_curves))
        elif key == 'hmaps':
            write_csv(fname, util.compose_arrays(sitemesh, hcurves))
        else:
            export_hazard_curves_csv(ekey, fname, sitecol, hcurves, oq.imtls)
        fnames.append(fname)
    return sorted(fnames)
Exemple #5
0
def export_ruptures_csv(ekey, dstore):
    """
    :param ekey: export key, i.e. a pair (datastore key, fmt)
    :param dstore: datastore object
    """
    oq = dstore['oqparam']
    if 'scenario' in oq.calculation_mode:
        return []
    dest = dstore.export_path('ruptures.csv')
    header = ('rupid multiplicity mag centroid_lon centroid_lat '
              'centroid_depth trt strike dip rake boundary').split()
    rows = []
    for rgetter in gen_rupture_getters(dstore):
        rups = rgetter.get_ruptures()
        rup_data = calc.RuptureData(rgetter.trt, rgetter.rlzs_by_gsim)
        for r in rup_data.to_array(rups):
            rows.append(
                (r['rup_id'], r['multiplicity'], r['mag'],
                 r['lon'], r['lat'], r['depth'],
                 rgetter.trt, r['strike'], r['dip'], r['rake'],
                 r['boundary']))
    rows.sort()  # by rupture serial
    comment = dstore.metadata
    comment.update(investigation_time=oq.investigation_time,
                   ses_per_logic_tree_path=oq.ses_per_logic_tree_path)
    writers.write_csv(dest, rows, header=header, sep='\t', comment=comment)
    return [dest]
Exemple #6
0
def save_disagg_to_csv(metadata, matrices):
    """
    Save disaggregation matrices to multiple .csv files.
    """
    skip_keys = ('Mag', 'Dist', 'Lon', 'Lat', 'Eps', 'TRT')
    com = {key: value for key, value in metadata.items()
           if value is not None and key not in skip_keys}
    for disag_tup, (poe, iml, matrix, fname) in matrices.items():
        com.update(poe='%.7f' % poe, iml='%.7e' % iml)
        if disag_tup == ('Mag', 'Lon', 'Lat'):
            matrix = numpy.swapaxes(matrix, 0, 1)
            matrix = numpy.swapaxes(matrix, 1, 2)
            disag_tup = ('Lon', 'Lat', 'Mag')

        axis = [metadata[v] for v in disag_tup]
        com.update(key=','.join(v for v in disag_tup))
        # compute axis mid points
        axis = [(ax[: -1] + ax[1:]) / 2. if ax.dtype == float
                else ax for ax in axis]
        values = None
        if len(axis) == 1:
            values = numpy.array([axis[0], matrix.flatten()]).T
        else:
            grids = numpy.meshgrid(*axis, indexing='ij')
            values = [g.flatten() for g in grids]
            values.append(matrix.flatten())
            values = numpy.array(values).T
        writers.write_csv(fname, values, comment=com, fmt='%.5E')
Exemple #7
0
def export_gmf_csv(key, export_dir, fname, sitecol, ruptures, gmfs, rlz,
                   investigation_time):
    """
    :param key: output_type and export_type
    :param export_dir: the directory where to export
    :param fname: name of the exported file
    :param sitecol: the full site collection
    :param ruptures: an ordered list of ruptures
    :param gmfs: an orderd list of ground motion fields
    :param rlz: a realization object
    :param investigation_time: investigation time (None for scenario)
    """
    dest = os.path.join(export_dir, fname)
    imts = list(gmfs[0].dtype.fields)
    # the csv file has the form
    # tag,indices,gmvs_imt_1,...,gmvs_imt_N
    rows = []
    for rupture, gmf in zip(ruptures, gmfs):
        try:
            indices = rupture.indices
        except AttributeError:
            indices = sitecol.indices
        if indices is None:
            indices = list(range(len(sitecol)))
        row = [rupture.tag, ' '.join(map(str, indices))] + \
              [gmf[imt] for imt in imts]
        rows.append(row)
    write_csv(dest, rows)
    return {key: [dest]}
Exemple #8
0
 def test_read_written(self):
     dtype = numpy.dtype([('a', float), ('b', float, 2)])
     written = numpy.array([(.1, [.2, .3]), (.3, [.4, .5])], dtype)
     dest = tempfile.NamedTemporaryFile().name
     write_csv(dest, written)
     read = read_composite_array(dest)
     numpy.testing.assert_equal(read, written)
Exemple #9
0
def export_realizations(ekey, dstore):
    rlzs = dstore[ekey[0]]
    data = [['ordinal', 'uid', 'weight']]
    for i, rlz in enumerate(rlzs):
        data.append([i, rlz['uid'], rlz['weight']])
    path = dstore.export_path('realizations.csv')
    writers.write_csv(path, data, fmt='%s', sep='\t')
    return [path]
Exemple #10
0
def export_ebr_specific(ekey, dstore):
    all_assets = get_assets_sites(dstore)
    spec_assets = all_assets[dstore['spec_indices'].value]
    outs = extract_outputs(ekey[0], dstore, ekey[1])
    for out in outs:
        arr = compose_arrays(spec_assets, out.array)
        writers.write_csv(out.path, arr, fmt='%9.7E')
    return [out.path for out in outs]
Exemple #11
0
def export_agg_losses(ekey, dstore):
    tags = dstore['tags']
    outs = extract_outputs(ekey[0], dstore, ekey[1])
    header = ['rupture_tag', 'aggregate_loss', 'insured_loss']
    for out in outs:
        data = [[tags[rec['rup_id']], rec['loss'][0], rec['loss'][1]]
                for rec in out.array]
        writers.write_csv(out.path, sorted(data), fmt='%9.7E', header=header)
    return [out.path for out in outs]
Exemple #12
0
def export_event_loss_csv(key, output, target):
    """
    Export Event Loss Table in CSV format
    """
    dest = _get_result_export_dest(target, output)
    rows = models.EventLossData.objects.filter(event_loss__output=output)
    data = sorted((row.rupture.tag, row.aggregate_loss) for row in rows)
    writers.write_csv(
        dest, [['Rupture', 'Aggregate Loss']] + data, fmt='%10.6E')
    return dest
Exemple #13
0
def export_sourcegroups(ekey, dstore):
    csm_info = dstore['csm_info']
    data = [['grp_id', 'trt', 'eff_ruptures']]
    for i, sm in enumerate(csm_info.source_models):
        for src_group in sm.src_groups:
            trt = source.capitalize(src_group.trt)
            er = src_group.eff_ruptures
            data.append((src_group.id, trt, er))
    path = dstore.export_path('sourcegroups.csv')
    writers.write_csv(path, data, fmt='%s')
    return [path]
Exemple #14
0
def export_csq_total_csv(ekey, dstore):
    rlzs = dstore['rlzs_assoc'].realizations
    R = len(rlzs)
    value = dstore[ekey[0]].value
    fnames = []
    for rlz, values in zip(rlzs, value):
        suffix = '.csv' if R == 1 else '-gsimltp_%s.csv' % rlz.uid
        fname = dstore.export_path(ekey[0] + suffix)
        writers.write_csv(fname, numpy.array([values], value.dtype))
        fnames.append(fname)
    return fnames
Exemple #15
0
def export_loss_map_csv(key, output, target):
    """
    Export `output` to `target` in CSV format
    """
    dest = _get_result_export_dest(target, output, file_ext=key[1])
    data = []
    for row in output.loss_map.lossmapdata_set.order_by('asset_ref'):
        data.append(LossMapPerAsset(row.asset_ref, row.value))
    header = [data[0]._fields]
    writers.write_csv(dest, header + data, fmt='%10.6E')
    return dest
Exemple #16
0
def export_csq_csv(ekey, dstore):
    rlzs = dstore['rlzs_assoc'].realizations
    R = len(rlzs)
    value = dstore[ekey[0]].value  # matrix N x R or T x R
    fnames = []
    for rlz, values in zip(rlzs, value.T):
        suffix = '.csv' if R == 1 else '-gsimltp_%s.csv' % rlz.uid
        fname = dstore.export_path(ekey[0] + suffix)
        writers.write_csv(fname, values)
        fnames.append(fname)
    return fnames
Exemple #17
0
def export_ebr_curves(ekey, dstore):
    rlzs = dstore['rlzs_assoc'].realizations
    assets = get_assets_sites(dstore)
    curves = dstore[ekey[0]]
    paths = []
    name = ekey[0].split('-')[0]  # rcurves, icurves
    for rlz in rlzs:
        array = compose_arrays(assets, curves[:, rlz.ordinal])
        path = dstore.export_path('%s-%s.csv' % (name, rlz.uid))
        writers.write_csv(path, array, fmt='%9.7E')
        paths.append(path)
    return paths
Exemple #18
0
def _export_curves_csv(name, assets, curves, export_dir, prefix, columns=None):
    fnames = []
    for loss_type in curves.dtype.fields:
        if assets is None:
            data = curves[loss_type]
        else:
            data = compose_arrays(assets, curves[loss_type])
        dest = os.path.join(
            export_dir, '%s-%s-%s.csv' % (prefix, loss_type, name))
        writers.write_csv(dest, data, fmt='%10.6E', header=columns)
        fnames.append(dest)
    return fnames
Exemple #19
0
def export_sitecol_csv(ekey, dstore):
    """
    :param ekey: export key, i.e. a pair (datastore key, fmt)
    :param dstore: datastore object
    """
    dest = dstore.export_path(*ekey)
    rows = []
    for site in dstore['sitecol']:
        rows.append([site.id, site.location.x, site.location.y, site.vs30,
                     site.vs30measured, site.z1pt0, site.z2pt5, site.backarc])
    write_csv(dest, sorted(rows, key=operator.itemgetter(0)))
    return [dest]
Exemple #20
0
def export_hmaps_csv(key, dest, sitemesh, array, comment):
    """
    Export the hazard maps of the given realization into CSV.

    :param key: output_type and export_type
    :param dest: name of the exported file
    :param sitemesh: site collection
    :param array: a composite array of dtype hmap_dt
    :param comment: comment to use as header of the exported CSV file
    """
    curves = util.compose_arrays(sitemesh, array)
    writers.write_csv(dest, curves, comment=comment)
    return [dest]
Exemple #21
0
def export_loss_curve_csv(key, output, target):
    """
    Export `output` to `target` in CSV format
    """
    dest = _get_result_export_dest(target, output)[:-3] + 'csv'
    data = []
    for row in output.loss_curve.losscurvedata_set.all().order_by('asset_ref'):
        lca = LossCurvePerAsset(
            row.asset_ref, row.losses, row.poes, row.average_loss)
        data.append(lca)
    header = [lca._fields]
    writers.write_csv(dest, header + data, fmt='%10.6E')
    return dest
Exemple #22
0
def export_avgloss_csv(key, output, target):
    """
    Export `output` to `target` in csv format for a given loss type
    """
    dest = _get_result_export_dest(target, output)[:-3] + 'csv'
    data = output.loss_curve.losscurvedata_set.all().order_by('asset_ref')
    header = ['lon', 'lat', 'asset_ref', 'asset_value', 'average_loss',
              'stddev_loss', 'loss_type']
    rows = [(c.location.x, c.location.y, c.asset_ref, c.asset_value,
             c.average_loss, c.stddev_loss or '', c.loss_curve.loss_type)
            for c in data]
    writers.write_csv(dest, [header] + rows)
    return dest
Exemple #23
0
 def test(self):
     job = self._run_test()
     data = [[curve.asset_ref, curve.location.x, curve.location.y,
              curve.average_loss, numpy.nan]
             for curve in models.LossCurveData.objects.filter(
                 loss_curve__output__oq_job=job).order_by('asset_ref')]
     fd, fname = tempfile.mkstemp(suffix='.csv')
     os.close(fd)
     writers.write_csv(
         fname, [['asset_ref', 'lon', 'lat', 'avg_loss~structural',
                  'ins_loss~structural']] + data, fmt='%10.6E')
     expected = self._test_path('expected/rlz-000-avg_loss.csv')
     self.assertEqual(open(fname).read(), open(expected).read())
Exemple #24
0
def export_event_loss(ekey, dstore):
    """
    :param ekey: export key, i.e. a pair (datastore key, fmt)
    :param dstore: datastore object
    """
    name, fmt = ekey
    fnames = []
    for i, data in enumerate(dstore[ekey[0]]):
        for loss_type in data:
            dest = os.path.join(
                dstore.export_dir, 'rlz-%03d-%s-%s.csv' % (i, loss_type, name))
            writers.write_csv(dest, sorted(data[loss_type]), fmt='%10.6E')
            fnames.append(dest)
    return fnames
Exemple #25
0
def export_event_loss_asset_csv(key, output, target):
    """
    Export Event Loss Per Asset in CSV format
    """
    dest = _get_result_export_dest(target, output)
    rows = []
    for event_loss in models.EventLossAsset.objects.filter(
            event_loss__output=output).select_related().order_by(
            'rupture__tag', 'asset__asset_ref'):
        rows.append([event_loss.rupture.tag,
                     event_loss.asset.asset_ref,
                     event_loss.loss])
    writers.write_csv(dest, rows)
    return dest
Exemple #26
0
def export_uhs_csv(key, dest, sitecol, hmaps):
    """
    Export the scalar outputs.

    :param key: output_type and export_type
    :param dest: file name
    :param sitecol: site collection
    :param hmaps:
        an array N x I x P where N is the number of sites,
        I the number of IMTs of SA type, and P the number of poes
    """
    rows = [[[lon, lat]] + list(row)
            for lon, lat, row in zip(sitecol.lons, sitecol.lats, hmaps)]
    write_csv(dest, rows)
    return {dest: dest}
    def test_get_stat_curves(self):
        tempdir = tempfile.mkdtemp()
        curves, ins_curves, maps = scientific.get_stat_curves(self.stats)

        actual = os.path.join(tempdir, 'expected_loss_curves.csv')
        writers.write_csv(actual, curves, fmt='%05.2f')

        tests.check_equal(__file__, 'expected_loss_curves.csv', actual)

        actual = os.path.join(tempdir, 'expected_loss_maps.csv')
        writers.write_csv(actual, maps, fmt='%05.2f')
        tests.check_equal(__file__, 'expected_loss_maps.csv', actual)

        # remove only if the test pass
        shutil.rmtree(tempdir)
Exemple #28
0
    def test_get_stat_curves_maps(self):
        tempdir = tempfile.mkdtemp()
        curves, maps = self.builder.get_curves_maps(self.stats)
        # expecting arrays of shape (Q1, N) with Q1=3, N=4
        actual = os.path.join(tempdir, 'expected_loss_curves.csv')
        writers.write_csv(actual, curves, fmt='%05.2f')

        tests.check_equal(__file__, 'expected_loss_curves.csv', actual)

        actual = os.path.join(tempdir, 'expected_loss_maps.csv')
        writers.write_csv(actual, maps, fmt='%05.2f')
        tests.check_equal(__file__, 'expected_loss_maps.csv', actual)

        # remove only if the test pass
        shutil.rmtree(tempdir)
Exemple #29
0
def export_disagg_by_src_csv(ekey, dstore):
    paths = []
    srcdata = dstore['disagg_by_grp'][()]
    header = ['source_id', 'poe']
    by_poe = operator.itemgetter(1)
    for name in dstore['disagg_by_src']:
        probs = dstore['disagg_by_src/' + name][()]
        ok = probs > 0
        src = srcdata[ok]
        data = [header] + sorted(zip(add_quotes(src['grp_name']), probs[ok]),
                                 key=by_poe, reverse=True)
        path = dstore.export_path(name + '_Src.csv')
        writers.write_csv(path, data, fmt='%.7e')
        paths.append(path)
    return paths
Exemple #30
0
def export_hazard_csv(key, dest, sitemesh, pmap,
                      imtls, comment):
    """
    Export the curves of the given realization into CSV.

    :param key: output_type and export_type
    :param dest: name of the exported file
    :param sitemesh: site collection
    :param pmap: a ProbabilityMap
    :param dict imtls: intensity measure types and levels
    :param comment: comment to use as header of the exported CSV file
    """
    curves = convert_to_array(pmap, sitemesh, imtls)
    writers.write_csv(dest, curves, comment=comment)
    return [dest]
Exemple #31
0
def export_loss_csv(ekey, dstore, data, suffix):
    """
    Export (aggregate) losses in CSV.

    :param key: per_asset_loss|asset-ins
    :param dstore: the datastore
    :param data: a list [(loss_type, unit, asset_ref, mean, stddev), ...]
    :param suffix: a suffix specifying the GSIM realization
    """
    dest = dstore.export_path('%s%s.%s' % (ekey[0], suffix, ekey[1]))
    if ekey[0] in ('agg', 'ins'):  # aggregate
        header = ['LossType', 'Unit', 'Mean', 'Standard Deviation']
    else:  # loss_map
        header = ['LossType', 'Unit', 'Asset', 'Mean', 'Standard Deviation']
        data.sort(key=operator.itemgetter(2))  # order by asset_ref
    writers.write_csv(dest, [header] + data, fmt='%11.7E')
    return dest
Exemple #32
0
def view_extreme(token, dstore):
    """
    Show sites where the mean hazard map reaches maximum values
    """
    mean = dstore.sel('hmaps-stats', stat='mean')[:, 0, 0, -1]  # shape N1MP
    site_ids, = numpy.where(mean == mean.max())
    arr = dstore['sitecol'][site_ids]
    return write_csv(io.BytesIO(), arr).decode('utf8')
Exemple #33
0
def export_agglosses(ekey, dstore):
    oq = dstore['oqparam']
    loss_dt = oq.loss_dt()
    cc = dstore['assetcol/cost_calculator']
    unit_by_lt = cc.units
    unit_by_lt['occupants'] = 'people'
    agglosses = dstore[ekey[0]]
    losses = []
    header = ['loss_type', 'unit', 'mean', 'stddev']
    for l, lt in enumerate(loss_dt.names):
        unit = unit_by_lt[lt.replace('_ins', '')]
        mean = agglosses[l]['mean']
        stddev = agglosses[l]['stddev']
        losses.append((lt, unit, mean, stddev))
    dest = dstore.build_fname('agglosses', '', 'csv')
    writers.write_csv(dest, losses, header=header)
    return [dest]
Exemple #34
0
def export_hcurves_csv(ekey, dstore):
    """
    Exports the hazard curves into several .csv files

    :param ekey: export key, i.e. a pair (datastore key, fmt)
    :param dstore: datastore object
    """
    oq = dstore['oqparam']
    rlzs_assoc = dstore['csm_info'].get_rlzs_assoc()
    sitecol = dstore['sitecol']
    sitemesh = get_mesh(sitecol)
    key, fmt = ekey
    if '/' in key:
        key, kind = key.rsplit('/', 1)
        ekey = (key, fmt)
    else:
        kind = ''
    fnames = []
    if oq.poes:
        pdic = DictArray({imt: oq.poes for imt in oq.imtls})
    for kind, hcurves in calc.PmapGetter(dstore).items(kind):
        fname = hazard_curve_name(dstore, ekey, kind, rlzs_assoc)
        comment = _comment(rlzs_assoc, kind, oq.investigation_time)
        if key == 'uhs' and oq.poes and oq.uniform_hazard_spectra:
            uhs_curves = calc.make_uhs(hcurves, oq.imtls, oq.poes,
                                       len(sitemesh))
            writers.write_csv(fname,
                              util.compose_arrays(sitemesh, uhs_curves),
                              comment=comment)
            fnames.append(fname)
        elif key == 'hmaps' and oq.poes and oq.hazard_maps:
            hmap = calc.make_hmap(hcurves, oq.imtls, oq.poes)
            fnames.extend(
                export_hazard_csv(ekey, fname, sitemesh, hmap, pdic, comment))
        elif key == 'hcurves':
            if export.from_db:  # called by export_from_db
                fnames.extend(
                    export_hcurves_by_imt_csv(ekey, kind, rlzs_assoc, fname,
                                              sitecol, hcurves, oq))
            else:  # when exporting directly from the datastore
                fnames.extend(
                    export_hazard_csv(ekey, fname, sitemesh, hcurves, oq.imtls,
                                      comment))

    return sorted(fnames)
Exemple #35
0
def export_disagg_csv(ekey, dstore):
    oq = dstore['oqparam']
    disagg_outputs = oq.disagg_outputs or disagg.pmf_map
    rlzs = dstore['csm_info'].get_rlzs_assoc().realizations
    group = dstore[ekey[0]]
    fnames = []
    skip_keys = ('Mag', 'Dist', 'Lon', 'Lat', 'Eps', 'TRT')
    for key in group:
        attrs = group[key].attrs
        iml = attrs['iml']
        try:
            rlz = rlzs[attrs['rlzi']]
        except TypeError:  # for stats
            rlz = attrs['rlzi']
        try:
            poes = [attrs['poe']] * len(disagg_outputs)
        except Exception:  # no poes_disagg were given
            poes = attrs['poe_agg']
        imt = from_string(attrs['imt'])
        lon, lat = attrs['location']
        metadata = {}
        # Loads "disaggMatrices" nodes
        if hasattr(rlz, 'sm_lt_path'):
            metadata['smlt_path'] = '_'.join(rlz.sm_lt_path)
            metadata['gsimlt_path'] = rlz.gsim_rlz.uid
        metadata['imt'] = imt.name
        metadata['investigation_time'] = oq.investigation_time
        metadata['lon'] = lon
        metadata['lat'] = lat
        metadata['Mag'] = attrs['mag_bin_edges']
        metadata['Dist'] = attrs['dist_bin_edges']
        metadata['Lon'] = attrs['lon_bin_edges']
        metadata['Lat'] = attrs['lat_bin_edges']
        metadata['Eps'] = attrs['eps_bin_edges']
        metadata['TRT'] = attrs['trt_bin_edges']
        for poe, label in zip(poes, disagg_outputs):
            com = {key: value for key, value in metadata.items()
                   if value is not None and key not in skip_keys}
            com.update(poe='%.7f' % poe, iml='%.7e' % iml)
            com.update(key=','.join(label.split('_')))
            fname = dstore.export_path(key + '_%s.csv' % label)
            values = extract(dstore, 'disagg/%s?by=%s' % (key, label))
            writers.write_csv(fname, values, comment=com, fmt='%.5E')
            fnames.append(fname)
    return fnames
Exemple #36
0
def export_avg_losses(ekey, dstore):
    """
    :param ekey: export key, i.e. a pair (datastore key, fmt)
    :param dstore: datastore object
    """
    avg_losses = dstore[ekey[0] + '/rlzs']
    rlzs = dstore['rlzs_assoc'].realizations
    assets = get_assets(dstore)
    columns = 'asset_ref lon lat avg_loss~structural ins_loss~structural' \
        .split()
    fnames = []
    for rlz, losses in zip(rlzs, avg_losses):
        dest = os.path.join(dstore.export_dir,
                            'rlz-%03d-avg_loss.csv' % rlz.ordinal)
        data = compose_arrays(assets, losses)
        writers.write_csv(dest, data, fmt='%10.6E', header=columns)
        fnames.append(dest)
    return fnames
Exemple #37
0
def print_(aw):
    if hasattr(aw, 'json'):
        try:
            attrs = hdf5.get_shape_descr(aw.json)
        except KeyError:  # no shape_descr, for instance for oqparam
            print(json.dumps(json.loads(aw.json), indent=2))
            return
        vars(aw).update(attrs)
    if hasattr(aw, 'shape_descr'):
        print(rst_table(aw.to_dframe()))
    elif hasattr(aw, 'array') and aw.dtype.names:
        sio = io.StringIO()
        write_csv(sio, aw.array)
        print(sio.getvalue())
    elif hasattr(aw, 'array'):
        print(aw.array)
    else:
        print(aw)
Exemple #38
0
 def test_case_23(self):
     # case with implicit grid and site model on a larger grid
     out = self.run_calc(case_23.__file__, 'job.ini', exports='csv')
     [fname] = out['ruptures', 'csv']
     self.assertEqualFiles('expected/%s' % strip_calc_id(fname), fname,
                           delta=1E-6)
     arr = self.calc.datastore.getitem('sitecol')
     tmp = gettemp(write_csv(io.BytesIO(), arr).decode('utf8'))
     self.assertEqualFiles('expected/sitecol.csv', tmp)
Exemple #39
0
def show(calc_id, key=None, rlzs=None):
    """
    Show the content of a datastore.

    :param calc_id: numeric calculation ID; if 0, show all calculations
    :param key: key of the datastore
    :param rlzs: flag; if given, print out the realizations in order
    """
    if not calc_id:
        if not os.path.exists(datastore.DATADIR):
            return
        rows = []
        for calc_id in datastore.get_calc_ids(datastore.DATADIR):
            try:
                oq = OqParam.from_(datastore.DataStore(calc_id).attrs)
                cmode, descr = oq.calculation_mode, oq.description
            except:  # invalid datastore directory
                logging.warn('Removed invalid calculation %d', calc_id)
                shutil.rmtree(
                    os.path.join(datastore.DATADIR, 'calc_%s' % calc_id))
            else:
                rows.append((calc_id, cmode, descr))
        for row in sorted(rows, key=lambda row: row[0]):  # by calc_id
            print('#%d %s: %s' % row)
        return
    ds = datastore.DataStore(calc_id)
    if key:
        if key in datastore.view:
            print(datastore.view(key, ds))
            return
        obj = ds[key]
        if hasattr(obj, 'value'):  # an array
            print(write_csv(io.StringIO(), obj.value))
        else:
            print(obj)
        return
    # print all keys
    oq = OqParam.from_(ds.attrs)
    print(
        oq.calculation_mode, 'calculation (%r) saved in %s contains:' %
        (oq.description, ds.calc_dir))
    for key in ds:
        print(key, humansize(ds.getsize(key)))

    # this part is experimental and not tested on purpose
    if rlzs and 'curves_by_trt_gsim' in ds:
        min_value = 0.01  # used in rmsep
        curves_by_rlz, mean_curves = combined_curves(ds)
        dists = []
        for rlz in sorted(curves_by_rlz):
            curves = curves_by_rlz[rlz]
            dist = sum(
                rmsep(mean_curves[imt], curves[imt], min_value)
                for imt in mean_curves.dtype.fields)
            dists.append((dist, rlz))
        for dist, rlz in sorted(dists):
            print('rlz=%s, rmsep=%s' % (rlz, dist))
Exemple #40
0
    def test_ok(self):
        fname = os.path.join(DATADIR, 'gmfdata.xml')
        sitecol, eids, gmfa = readinput.get_scenario_from_nrml(
            self.oqparam, fname)
        coords = list(zip(sitecol.mesh.lons, sitecol.mesh.lats))
        self.assertEqual(
            writers.write_csv(StringIO(), coords), '''\
0.000000E+00,0.000000E+00
0.000000E+00,1.000000E-01
0.000000E+00,2.000000E-01''')
        assert_allclose(eids, range(5))
        self.assertEqual(
            writers.write_csv(StringIO(), gmfa), '''\
PGA:float32,PGV:float32
6.824957E-01 3.656627E-01 8.700833E-01 3.279292E-01 6.968687E-01,6.824957E-01 3.656627E-01 8.700833E-01 3.279292E-01 6.968687E-01
1.270898E-01 2.561812E-01 2.106384E-01 2.357551E-01 2.581405E-01,1.270898E-01 2.561812E-01 2.106384E-01 2.357551E-01 2.581405E-01
1.603097E-01 1.106853E-01 2.232175E-01 1.781143E-01 1.351649E-01,1.603097E-01 1.106853E-01 2.232175E-01 1.781143E-01 1.351649E-01'''
        )
Exemple #41
0
def export_hazard_csv(key, dest, sitemesh, pmap, pdic, comment):
    """
    Export the hazard maps of the given realization into CSV.

    :param key: output_type and export_type
    :param dest: name of the exported file
    :param sitemesh: site collection
    :param pmap: a ProbabilityMap
    :param pdic: intensity measure types and levels
    :param comment: comment to use as header of the exported CSV file
    """
    if isinstance(pmap, dict):  # old format
        array = calc.convert_to_array(pmap, len(sitemesh), pdic)
    else:  # new format for engine >= 3.2
        array = pmap
    curves = util.compose_arrays(sitemesh, array)
    writers.write_csv(dest, curves, comment=comment)
    return [dest]
Exemple #42
0
def export_agglosses(ekey, dstore):
    oq = dstore['oqparam']
    loss_dt = oq.loss_dt()
    cc = dstore['cost_calculator']
    unit_by_lt = cc.units
    unit_by_lt['occupants'] = 'people'
    agglosses = dstore[ekey[0]]
    losses = []
    header = ['rlz_id', 'loss_type', 'unit', 'mean', 'stddev']
    for r in range(len(agglosses)):
        for l, lt in enumerate(loss_dt.names):
            unit = unit_by_lt[lt]
            mean = agglosses[r, l]['mean']
            stddev = agglosses[r, l]['stddev']
            losses.append((r, lt, unit, mean, stddev))
    dest = dstore.build_fname('agglosses', '', 'csv')
    writers.write_csv(dest, losses, header=header, comment=dstore.metadata)
    return [dest]
Exemple #43
0
def export_hmaps_csv(key, dest, sitemesh, array, pdic, comment):
    """
    Export the hazard maps of the given realization into CSV.

    :param key: output_type and export_type
    :param dest: name of the exported file
    :param sitemesh: site collection
    :param array: an array of shape (N, P * I)
    :param pdic: intensity measure types and levels
    :param comment: comment to use as header of the exported CSV file
    """
    imts = list(pdic)
    poes = pdic[imts[0]]
    dt = numpy.dtype([('%s-%s' % (imt, poe), F32) for imt in imts
                      for poe in poes])
    curves = util.compose_arrays(sitemesh, array.view(dt)[:, 0])
    writers.write_csv(dest, curves, comment=comment)
    return [dest]
Exemple #44
0
def show(what='contents', calc_id=-1, extra=()):
    """
    Show the content of a datastore (by default the last one).
    """
    datadir = datastore.get_datadir()
    if what == 'all':  # show all
        if not os.path.exists(datadir):
            return
        rows = []
        for calc_id in datastore.get_calc_ids(datadir):
            try:
                ds = read(calc_id)
                oq = ds['oqparam']
                cmode, descr = oq.calculation_mode, oq.description
            except:
                # invalid datastore file, or missing calculation_mode
                # and description attributes, perhaps due to a manual kill
                f = os.path.join(datadir, 'calc_%s.hdf5' % calc_id)
                logging.warn('Unreadable datastore %s', f)
                continue
            else:
                rows.append((calc_id, cmode, descr.encode('utf-8')))
        for row in sorted(rows, key=lambda row: row[0]):  # by calc_id
            print('#%d %s: %s' % row)
        return

    ds = read(calc_id)

    # this part is experimental
    if what == 'rlzs' and 'poes' in ds:
        min_value = 0.01  # used in rmsep
        getter = getters.PmapGetter(ds)
        sitecol = ds['sitecol']
        pmaps = getter.get_pmaps(sitecol.sids)
        weights = [rlz.weight for rlz in getter.rlzs]
        mean = stats.compute_pmap_stats(pmaps, [numpy.mean], weights)
        dists = []
        for rlz, pmap in zip(getter.rlzs, pmaps):
            dist = rmsep(mean.array, pmap.array, min_value)
            dists.append((dist, rlz))
        print('Realizations in order of distance from the mean curves')
        for dist, rlz in sorted(dists):
            print('%s: rmsep=%s' % (rlz, dist))
    elif view.keyfunc(what) in view:
        print(view(what, ds))
    elif what.split('/', 1)[0] in extract:
        print(extract(ds, what, *extra))
    elif what in ds:
        obj = ds[what]
        if hasattr(obj, 'value'):  # an array
            print(write_csv(io.BytesIO(), obj.value).decode('utf8'))
        else:
            print(obj)
    else:
        print('%s not found' % what)

    ds.close()
Exemple #45
0
def export_disagg_by_src_csv(ekey, dstore):
    paths = []
    srcdata = dstore['disagg_by_src/source_id'].value
    header = ['source_id', 'source_name', 'poe']
    by_poe = operator.itemgetter(2)
    for name in dstore['disagg_by_src']:
        if name == 'source_id':
            continue
        probs = dstore['disagg_by_src/' + name].value
        ok = probs > 0
        src = srcdata[ok]
        data = [header] + sorted(
            zip(src['source_id'], add_quotes(src['source_name']), probs[ok]),
            key=by_poe, reverse=True)
        path = dstore.export_path(name + '_Src.csv')
        writers.write_csv(path, data, fmt='%.7e')
        paths.append(path)
    return paths
Exemple #46
0
def export_hcurves_csv(ekey, dstore):
    """
    Exports the hazard curves into several .csv files

    :param ekey: export key, i.e. a pair (datastore key, fmt)
    :param dstore: datastore object
    """
    oq = dstore['oqparam']
    info = get_info(dstore)
    R = dstore['csm_info'].get_num_rlzs()
    sitecol = dstore['sitecol']
    sitemesh = get_mesh(sitecol)
    key, kind, fmt = get_kkf(ekey)
    fnames = []
    comment = dstore.metadata
    hmap_dt = oq.hmap_dt()
    for kind in oq.get_kinds(kind, R):
        fname = hazard_curve_name(dstore, (key, fmt), kind)
        comment.update(kind=kind, investigation_time=oq.investigation_time)
        if (key in ('hmaps', 'uhs') and oq.uniform_hazard_spectra
                or oq.hazard_maps):
            hmap = extract(dstore, 'hmaps?kind=' + kind)[kind]
        if key == 'uhs' and oq.poes and oq.uniform_hazard_spectra:
            uhs_curves = calc.make_uhs(hmap, info)
            writers.write_csv(fname,
                              util.compose_arrays(sitemesh, uhs_curves),
                              comment=comment)
            fnames.append(fname)
        elif key == 'hmaps' and oq.poes and oq.hazard_maps:
            fnames.extend(
                export_hmaps_csv(ekey, fname, sitemesh,
                                 hmap.flatten().view(hmap_dt), comment))
        elif key == 'hcurves':
            hcurves = extract(dstore, 'hcurves?kind=' + kind)[kind]
            if 'amplification' in oq.inputs:
                imtls = DictArray(
                    {imt: oq.soil_intensities
                     for imt in oq.imtls})
            else:
                imtls = oq.imtls
            fnames.extend(
                export_hcurves_by_imt_csv(ekey, kind, fname, sitecol, hcurves,
                                          imtls, comment))
    return sorted(fnames)
Exemple #47
0
def show(what, calc_id=-1):
    """
    Show the content of a datastore.

    :param what: key or view of the datastore
    :param calc_id: numeric calculation ID; if -1, show the last calculation
    """
    if what == 'all':  # show all
        if not os.path.exists(datastore.DATADIR):
            return
        rows = []
        for calc_id in datastore.get_calc_ids(datastore.DATADIR):
            try:
                ds = datastore.read(calc_id)
                oq = ds['oqparam']
                cmode, descr = oq.calculation_mode, oq.description
            except:
                # invalid datastore file, or missing calculation_mode
                # and description attributes, perhaps due to a manual kill
                f = os.path.join(datastore.DATADIR, 'calc_%s.hdf5' % calc_id)
                logging.warn('Unreadable datastore %s', f)
                continue
            else:
                rows.append((calc_id, cmode, descr.encode('utf-8')))
                ds.close()
        for row in sorted(rows, key=lambda row: row[0]):  # by calc_id
            print('#%d %s: %s' % row)
        return
    elif what == 'views':
        for name in sorted(datastore.view):
            print(name)
        return

    ds = datastore.read(calc_id)

    # this part is experimental
    if what == 'rlzs' and 'hcurves' in ds:
        min_value = 0.01  # used in rmsep
        curves_by_rlz, mean_curves = get_hcurves_and_means(ds)
        dists = []
        for rlz, curves in curves_by_rlz.items():
            dist = sum(
                rmsep(mean_curves[imt], curves[imt], min_value)
                for imt in mean_curves.dtype.fields)
            dists.append((dist, rlz))
        print('Realizations in order of distance from the mean curves')
        for dist, rlz in sorted(dists):
            print('%s: rmsep=%s' % (rlz, dist))
    elif what in datastore.view:
        print(datastore.view(what, ds))
    else:
        obj = ds[what]
        if hasattr(obj, 'value'):  # an array
            print(write_csv(io.StringIO(), obj.value))
        else:
            print(obj)
Exemple #48
0
    def test_ok(self):
        fname = os.path.join(DATADIR,  'gmfdata.xml')
        eids, gmfa = readinput.get_scenario_from_nrml(self.oqparam, fname)
        assert_allclose(eids, range(5))
        self.assertEqual(
            writers.write_csv(BytesIO(), gmfa), b'''\
PGA:float32,PGV:float32
6.824957E-01 3.656627E-01 8.700833E-01 3.279292E-01 6.968687E-01,6.824957E-01 3.656627E-01 8.700833E-01 3.279292E-01 6.968687E-01
1.270898E-01 2.561812E-01 2.106384E-01 2.357551E-01 2.581405E-01,1.270898E-01 2.561812E-01 2.106384E-01 2.357551E-01 2.581405E-01
1.603097E-01 1.106853E-01 2.232175E-01 1.781143E-01 1.351649E-01,1.603097E-01 1.106853E-01 2.232175E-01 1.781143E-01 1.351649E-01''')
Exemple #49
0
def export_ruptures_csv(ekey, dstore):
    """
    :param ekey: export key, i.e. a pair (datastore key, fmt)
    :param dstore: datastore object
    """
    oq = dstore['oqparam']
    if 'scenario' in oq.calculation_mode:
        return []
    dest = dstore.export_path('ruptures.csv')
    arr = extract(dstore, 'rupture_info')
    if export.sanity_check:
        bad = view('bad_ruptures', dstore)
        if bad.count('\n') > 3:  # nonempty rst_table
            print(bad, file=sys.stderr)
    comment = dstore.metadata
    comment.update(investigation_time=oq.investigation_time,
                   ses_per_logic_tree_path=oq.ses_per_logic_tree_path)
    writers.write_csv(dest, arr, comment=comment)
    return [dest]
Exemple #50
0
def export_rup_data(ekey, dstore):
    rupture_data = dstore[ekey[0]]
    paths = []
    for trt in sorted(rupture_data):
        fname = 'rup_data_%s.csv' % trt.lower().replace(' ', '_')
        data = rupture_data[trt].value
        data.sort(order='rupserial')
        if len(data):
            paths.append(write_csv(dstore.export_path(fname), data))
    return paths
Exemple #51
0
def export_stats_csv(key, export_dir, fname, sitecol, data_by_imt):
    """
    Export the scalar outputs.

    :param key: output_type and export_type
    :param export_dir: the directory where to export
    :param fname: file name
    :param sitecol: site collection
    :param data_by_imt: dictionary of floats keyed by IMT
    """
    dest = os.path.join(export_dir, fname)
    rows = []
    for imt in sorted(data_by_imt):
        row = [imt]
        for col in data_by_imt[imt]:
            row.append(scientificformat(col))
        rows.append(row)
    write_csv(dest, numpy.array(rows).T)
    return {fname: dest}
Exemple #52
0
def nrml_to_csv(fnames, outdir='.'):
    for fname in fnames:
        converter.fname = fname
        name = os.path.basename(fname)[:-4]  # strip .xml
        root = nrml.read(fname)
        srcs = collections.defaultdict(list)  # geom_index -> rows
        if 'nrml/0.4' in root['xmlns']:
            for srcnode in root.sourceModel:
                appendrow(converter.convert_node(srcnode), srcs)
        else:
            for srcgroup in root.sourceModel:
                trt = srcgroup['tectonicRegion']
                for srcnode in srcgroup:
                    srcnode['tectonicRegion'] = trt
                    appendrow(converter.convert_node(srcnode), srcs)
        for kind, rows in srcs.items():
            dest = os.path.join(outdir, '%s_%s.csv' % (name, kind))
            logging.info('Saving %s', dest)
            write_csv(dest, rows, header=rows[0]._fields)
Exemple #53
0
def export_gmf_data_csv(ekey, dstore):
    oq = dstore['oqparam']
    rlzs_assoc = dstore['csm_info'].get_rlzs_assoc()
    if 'scenario' in oq.calculation_mode:
        imtls = dstore['oqparam'].imtls
        gsims = [str(rlz.gsim_rlz) for rlz in rlzs_assoc.realizations]
        n_gmfs = oq.number_of_ground_motion_fields
        fields = ['%03d' % i for i in range(n_gmfs)]
        dt = numpy.dtype([(f, F32) for f in fields])
        etags, gmfs_ = calc.get_gmfs(dstore)
        sitemesh = get_mesh(dstore['sitecol'])
        writer = writers.CsvWriter(fmt='%.5f')
        for gsim, gmfa in zip(gsims, gmfs_):  # gmfa of shape (N, I, E)
            for imti, imt in enumerate(imtls):
                gmfs = numpy.zeros(len(gmfa), dt)
                for e, event in enumerate(dt.names):
                    gmfs[event] = gmfa[:, imti, e]
                dest = dstore.build_fname('gmf', '%s-%s' % (gsim, imt), 'csv')
                data = util.compose_arrays(sitemesh, gmfs)
                writer.save(data, dest)
        return writer.getsaved()
    else:  # event based
        eid = int(ekey[0].split('/')[1]) if '/' in ekey[0] else None
        gmfa = numpy.fromiter(
            GmfDataGetter.gen_gmfs(dstore['gmf_data'], rlzs_assoc, eid),
            gmf_data_dt)
        if eid is None:  # new format
            fname = dstore.build_fname('gmf', 'data', 'csv')
            gmfa.sort(order=['rlzi', 'sid', 'eid', 'imti'])
            writers.write_csv(fname, gmfa)
            return [fname]
        # old format for single eid
        fnames = []
        imts = list(oq.imtls)
        for rlzi, array in group_array(gmfa, 'rlzi').items():
            rlz = rlzs_assoc.realizations[rlzi]
            data, comment = _build_csv_data(array, rlz, dstore['sitecol'],
                                            imts, oq.investigation_time)
            fname = dstore.build_fname('gmf', '%d-rlz-%03d' % (eid, rlzi),
                                       'csv')
            writers.write_csv(fname, data, comment=comment)
            fnames.append(fname)
        return fnames
Exemple #54
0
def export_gmf_txt(key, dest, sitecol, ruptures, rlz, investigation_time):
    """
    :param key: output_type and export_type
    :param dest: name of the exported file
    :param sitecol: the full site collection
    :param ruptures: an ordered list of ruptures
    :param rlz: a realization object
    :param investigation_time: investigation time (None for scenario)
    """
    imts = ruptures[0].gmf.dtype.names
    # the csv file has the form
    # etag,indices,gmvs_imt_1,...,gmvs_imt_N
    rows = []
    for rupture in ruptures:
        indices = rupture.indices
        row = [rupture.etag, ' '.join(map(str, indices))] + \
              [rupture.gmf[imt] for imt in imts]
        rows.append(row)
    write_csv(dest, rows)
    return {key: [dest]}
Exemple #55
0
def export_event_loss_table(ekey, dstore):
    """
    :param ekey: export key, i.e. a pair (datastore key, fmt)
    :param dstore: datastore object
    """
    name, fmt = ekey
    fnames = []
    elt = dstore[name]
    tags = dstore['tags']
    for loss_type in elt:
        for rlz_uid in elt[loss_type]:
            data = [[tags[e['rup_id']], e['loss']]
                    for e in elt[loss_type][rlz_uid]]
            # the name is extracted from 'event_loss_table-rlzs' by removing
            # the last 5 characters: 'event_loss_table'
            fname = '%s-%s-%s.csv' % (name[:-5], rlz_uid, loss_type)
            dest = os.path.join(dstore.export_dir, fname)
            writers.write_csv(dest, sorted(data), fmt='%10.6E')
            fnames.append(dest)
    return fnames
Exemple #56
0
def export_assetcol(ekey, dstore):
    """
    Export the asset collection in CSV.
    """
    assetcol = dstore[ekey[0]].value
    sitemesh = dstore['sitemesh'].value
    taxonomies = dstore['taxonomies'].value
    header = list(assetcol.dtype.names)
    dest = os.path.join(dstore.export_dir, '%s.%s' % ekey)
    columns = [None] * len(header)
    for i, field in enumerate(header):
        if field == 'taxonomy':
            columns[i] = taxonomies[assetcol[field]]
        elif field == 'site_id':
            header[i] = 'lon_lat'
            columns[i] = sitemesh[assetcol[field]]
        else:
            columns[i] = assetcol[field]
    writers.write_csv(dest, [header] + list(zip(*columns)), fmt='%s')
    return [dest]
    def test_ok(self):
        fname = os.path.join(DATADIR,  'gmfdata.xml')
        sitecol, tags, gmfa = readinput.get_scenario_from_nrml(
            self.oqparam, fname)
        coords = zip(sitecol.mesh.lons, sitecol.mesh.lats)
        self.assertEqual(writers.write_csv(StringIO(), coords), '''\
0.00000000E+00,0.00000000E+00
0.00000000E+00,1.00000000E-01
0.00000000E+00,2.00000000E-01''')
        self.assertEqual('\n'.join(tags), '''\
scenario-0000000000
scenario-0000000001
scenario-0000000002
scenario-0000000003
scenario-0000000004''')
        self.assertEqual(
            writers.write_csv(StringIO(), gmfa), '''\
PGV:float64:,PGA:float64:
6.82495715E-01 3.65662735E-01 8.70083359E-01 3.27929201E-01 6.96868642E-01,6.82495715E-01 3.65662735E-01 8.70083359E-01 3.27929201E-01 6.96868642E-01
1.27089832E-01 2.56181252E-01 2.10638411E-01 2.35755152E-01 2.58140526E-01,1.27089832E-01 2.56181252E-01 2.10638411E-01 2.35755152E-01 2.58140526E-01
1.60309678E-01 1.10685275E-01 2.23217460E-01 1.78114255E-01 1.35164914E-01,1.60309678E-01 1.10685275E-01 2.23217460E-01 1.78114255E-01 1.35164914E-01''')
Exemple #58
0
def export_agglosses(ekey, dstore):
    unit_by_lt = {ct['name']: ct['unit'] for ct in dstore['cost_types']}
    unit_by_lt['occupants'] = 'people'
    agglosses = dstore[ekey[0]]
    fnames = []
    for rlz in dstore['csm_info'].get_rlzs_assoc().realizations:
        loss = agglosses[rlz.ordinal]
        losses = numpy.zeros(
            1, numpy.dtype([(lt, agg_dt) for lt in loss.dtype.names]))
        header = []
        for lt in loss.dtype.names:
            losses[lt]['unit'] = unit_by_lt[lt]
            header.append('%s-unit' % lt)
            losses[lt]['mean'] = loss[lt]['mean']
            header.append('%s-mean' % lt)
            losses[lt]['stddev'] = loss[lt]['stddev']
            header.append('%s-stddev' % lt)
        dest = dstore.build_fname('agglosses', rlz, 'csv')
        writers.write_csv(dest, losses, header=header)
        fnames.append(dest)
    return sorted(fnames)
Exemple #59
0
def export_gmf_txt(key, dest, sitecol, imts, ruptures, rlz,
                   investigation_time):
    """
    :param key: output_type and export_type
    :param dest: name of the exported file
    :param sitecol: the full site collection
    :param imts: the list of intensity measure types
    :param ruptures: an ordered list of ruptures
    :param rlz: a realization object
    :param investigation_time: investigation time (None for scenario)
    """
    # the csv file has the form
    # etag,indices,gmvs_imt_1,...,gmvs_imt_N
    rows = []
    for rupture in ruptures:
        indices = rupture.indices
        gmvs = [a['gmv'] for a in group_array(rupture.gmfa, 'imti').values()]
        row = [rupture.etag, ' '.join(map(str, indices))] + gmvs
        rows.append(row)
    write_csv(dest, rows)
    return {key: [dest]}
Exemple #60
0
def export_agglosses(ekey, dstore):
    oq = dstore['oqparam']
    loss_dt = oq.loss_dt()
    cc = dstore['assetcol/cost_calculator']
    unit_by_lt = cc.units
    unit_by_lt['occupants'] = 'people'
    agglosses = dstore[ekey[0]]
    fnames = []
    for rlz in dstore['csm_info'].get_rlzs_assoc().realizations:
        loss = agglosses[rlz.ordinal]
        losses = []
        header = ['loss_type', 'unit', 'mean', 'stddev']
        for l, lt in enumerate(loss_dt.names):
            unit = unit_by_lt[lt.replace('_ins', '')]
            mean = loss[l]['mean']
            stddev = loss[l]['stddev']
            losses.append((lt, unit, mean, stddev))
        dest = dstore.build_fname('agglosses', rlz, 'csv')
        writers.write_csv(dest, losses, header=header)
        fnames.append(dest)
    return sorted(fnames)