Esempio n. 1
0
def export_agg_losses(ekey, dstore):
    """
    :param ekey: export key, i.e. a pair (datastore key, fmt)
    :param dstore: datastore object
    """
    dskey = ekey[0]
    oq = dstore['oqparam']
    aggregate_by = oq.aggregate_by if dskey.startswith('agg_') else []
    name, value, rlzs_or_stats = _get_data(dstore, dskey, oq.hazard_stats())
    # value has shape (K, R, L)
    writer = writers.CsvWriter(fmt=writers.FIVEDIGITS)
    tagcol = dstore['assetcol/tagcol']
    aggtags = list(tagcol.get_aggkey(aggregate_by).values())
    aggtags.append(('*total*',) * len(aggregate_by))
    expvalue = dstore['agg_values'][()]  # shape (K+1, L)
    tagnames = tuple(aggregate_by)
    header = ('loss_type',) + tagnames + (
        'loss_value', 'exposed_value', 'loss_ratio')
    md = dstore.metadata
    md.update(dict(investigation_time=oq.investigation_time,
                   risk_investigation_time=oq.risk_investigation_time or
                   oq.investigation_time))
    for r, ros in enumerate(rlzs_or_stats):
        ros = ros if isinstance(ros, str) else 'rlz-%03d' % ros
        rows = []
        for (k, l), loss in numpy.ndenumerate(value[:, r]):
            if loss:  # many tag combinations are missing
                evalue = expvalue[k, l]
                row = aggtags[k] + (loss, evalue, loss / evalue)
                rows.append((oq.loss_names[l],) + row)
        dest = dstore.build_fname(name, ros, 'csv')
        writer.save(rows, dest, header, comment=md)
    return writer.getsaved()
Esempio n. 2
0
def export_damages_csv(ekey, dstore):
    oq = dstore['oqparam']
    dmg_dt = build_damage_dt(dstore)
    rlzs = dstore['full_lt'].get_realizations()
    data = dstore[ekey[0]]
    writer = writers.CsvWriter(fmt='%.6E')
    assets = get_assets(dstore)
    md = dstore.metadata
    if oq.investigation_time:
        md.update(dict(investigation_time=oq.investigation_time,
                       risk_investigation_time=oq.risk_investigation_time
                       or oq.investigation_time))
    if ekey[0].endswith('stats'):
        rlzs_or_stats = oq.hazard_stats()
    else:
        rlzs_or_stats = ['rlz-%03d' % r for r in range(len(rlzs))]
    name = ekey[0].split('-')[0]
    if oq.calculation_mode != 'classical_damage':
        name = 'avg_' + name
    for i, ros in enumerate(rlzs_or_stats):
        if oq.modal_damage_state:
            damages = modal_damage_array(data[:, i], dmg_dt)
        else:
            damages = build_damage_array(data[:, i], dmg_dt)
        fname = dstore.build_fname(name, ros, ekey[1])
        writer.save(compose_arrays(assets, damages), fname,
                    comment=md, renamedict=dict(id='asset_id'))
    return writer.getsaved()
Esempio n. 3
0
def export_event_loss_table(ekey, dstore):
    """
    :param ekey: export key, i.e. a pair (datastore key, fmt)
    :param dstore: datastore object
    """
    oq = dstore['oqparam']
    writer = writers.CsvWriter(fmt=writers.FIVEDIGITS)
    dest = dstore.build_fname('risk_by_event', '', 'csv')
    md = dstore.metadata
    if 'scenario' not in oq.calculation_mode:
        md.update(dict(investigation_time=oq.investigation_time,
                       risk_investigation_time=oq.risk_investigation_time
                       or oq.investigation_time))
    events = dstore['events'][()]
    K = dstore.get_attr('risk_by_event', 'K', 0)
    try:
        lstates = dstore.get_attr('risk_by_event', 'limit_states').split()
    except KeyError:  # ebrisk, no limit states
        lstates = []
    lnames = numpy.array(oq.loss_types)
    df = dstore.read_df('risk_by_event', 'agg_id', dict(agg_id=K))
    df['loss_type'] = lnames[df.loss_id.to_numpy()]
    del df['loss_id']
    if 'variance' in df.columns:
        del df['variance']
    ren = {'dmg_%d' % i: lstate for i, lstate in enumerate(lstates, 1)}
    df.rename(columns=ren, inplace=True)
    evs = events[df.event_id.to_numpy()]
    if 'scenario' not in oq.calculation_mode:
        df['rup_id'] = evs['rup_id']
    if 'scenario' not in oq.calculation_mode and 'year' in evs.dtype.names:
        df['year'] = evs['year']
    df.sort_values(['event_id', 'loss_type'], inplace=True)
    writer.save(df, dest, comment=md)
    return writer.getsaved()
Esempio n. 4
0
def export_gmf_data_csv(ekey, dstore):
    oq = dstore['oqparam']
    imts = list(oq.imtls)
    df = dstore.read_df('gmf_data').sort_values(['eid', 'sid'])
    ren = {'sid': 'site_id', 'eid': 'event_id'}
    for m, imt in enumerate(imts):
        ren[f'gmv_{m}'] = 'gmv_' + imt
    for imt in oq.get_sec_imts():
        ren[imt] = f'sep_{imt}'
    df.rename(columns=ren, inplace=True)
    event_id = dstore['events']['id']
    f = dstore.build_fname('sitemesh', '', 'csv')
    arr = dstore['sitecol'][['lon', 'lat']]
    sids = numpy.arange(len(arr), dtype=U32)
    sites = util.compose_arrays(sids, arr, 'site_id')
    writers.write_csv(f, sites, comment=dstore.metadata)
    fname = dstore.build_fname('gmf', 'data', 'csv')
    writers.CsvWriter(fmt=writers.FIVEDIGITS).save(df,
                                                   fname,
                                                   comment=dstore.metadata)
    if 'sigma_epsilon' in dstore['gmf_data']:
        sig_eps_csv = dstore.build_fname('sigma_epsilon', '', 'csv')
        sig_eps = dstore['gmf_data/sigma_epsilon'][()]
        sig_eps['eid'] = event_id[sig_eps['eid']]
        sig_eps.sort(order='eid')
        header = list(sig_eps.dtype.names)
        header[0] = 'event_id'
        writers.write_csv(sig_eps_csv,
                          sig_eps,
                          header=header,
                          comment=dstore.metadata)
        return [fname, sig_eps_csv, f]
    else:
        return [fname, f]
Esempio n. 5
0
def export_asset_risk_csv(ekey, dstore):
    """
    :param ekey: export key, i.e. a pair (datastore key, fmt)
    :param dstore: datastore object
    """
    writer = writers.CsvWriter(fmt=writers.FIVEDIGITS)
    path = '%s.%s' % (sanitize(ekey[0]), ekey[1])
    fname = dstore.export_path(path)
    md = json.loads(extract(dstore, 'exposure_metadata').json)
    tostr = {'taxonomy': md['taxonomy']}
    for tagname in md['tagnames']:
        tostr[tagname] = md[tagname]
    tagnames = sorted(set(md['tagnames']) - {'id'})
    arr = extract(dstore, 'asset_risk').array
    rows = []
    lossnames = sorted(name for name in arr.dtype.names if 'loss' in name)
    expnames = [name for name in arr.dtype.names if name not in md['tagnames']
                and 'loss' not in name and name not in 'lon lat']
    colnames = tagnames + ['lon', 'lat'] + expnames + lossnames
    # sanity check
    assert len(colnames) == len(arr.dtype.names)
    for rec in arr:
        row = []
        for name in colnames:
            value = rec[name]
            try:
                row.append(tostr[name][value])
            except KeyError:
                row.append(value)
        rows.append(row)
    writer.save(rows, fname, colnames)
    return [fname]
Esempio n. 6
0
def export_agg_curve_rlzs(ekey, dstore):
    oq = dstore['oqparam']
    lnames = numpy.array(oq.loss_names)
    agg_tags = get_agg_tags(dstore, oq.aggregate_by)
    aggvalue = dstore['agg_values'][()]  # shape (K+1, L)
    md = dstore.metadata
    md['risk_investigation_time'] = (
        oq.risk_investigation_time or oq.investigation_time)
    writer = writers.CsvWriter(fmt=writers.FIVEDIGITS)
    descr = hdf5.get_shape_descr(dstore[ekey[0]].attrs['json'])
    name, suffix = ekey[0].split('-')
    rlzs_or_stats = descr[suffix[:-1]]
    aw = hdf5.ArrayWrapper(dstore[ekey[0]], descr, ('loss_value',))
    dataf = aw.to_dframe().set_index(suffix[:-1])
    for r, ros in enumerate(rlzs_or_stats):
        md['kind'] = f'{name}-' + (
            ros if isinstance(ros, str) else 'rlz-%03d' % ros)
        try:
            df = dataf[dataf.index == ros]
        except KeyError:
            logging.warning('No data for %s', md['kind'])
            continue
        dic = {col: df[col].to_numpy() for col in dataf.columns}
        dic['loss_type'] = lnames[dic['lti']]
        for tagname in oq.aggregate_by:
            dic[tagname] = agg_tags[tagname][dic['agg_id']]
        dic['loss_ratio'] = dic['loss_value'] / aggvalue[
            dic['agg_id'], dic.pop('lti')]
        dic['annual_frequency_of_exceedence'] = 1 / dic['return_period']
        del dic['agg_id']
        dest = dstore.build_fname(md['kind'], '', 'csv')
        writer.save(pandas.DataFrame(dic), dest, comment=md)
    return writer.getsaved()
Esempio n. 7
0
def export_avg_losses(ekey, dstore):
    """
    :param ekey: export key, i.e. a pair (datastore key, fmt)
    :param dstore: datastore object
    """
    dskey = ekey[0]
    oq = dstore['oqparam']
    dt = [(ln, F32) for ln in oq.loss_names]
    name, value, rlzs_or_stats = _get_data(dstore, dskey, oq.hazard_stats())
    writer = writers.CsvWriter(fmt=writers.FIVEDIGITS)
    assets = get_assets(dstore)
    md = dstore.metadata
    md.update(
        dict(investigation_time=oq.investigation_time,
             risk_investigation_time=oq.risk_investigation_time
             or oq.investigation_time))
    for ros, values in zip(rlzs_or_stats, value.transpose(1, 0, 2)):
        dest = dstore.build_fname(name, ros, 'csv')
        array = numpy.zeros(len(values), dt)
        for li, ln in enumerate(oq.loss_names):
            array[ln] = values[:, li]
        writer.save(compose_arrays(assets, array),
                    dest,
                    comment=md,
                    renamedict=dict(id='asset_id'))
    return writer.getsaved()
Esempio n. 8
0
def export_agg_risk_csv(ekey, dstore):
    """
    :param ekey: export key, i.e. a pair (datastore key, fmt)
    :param dstore: datastore object
    """
    writer = writers.CsvWriter(fmt=writers.FIVEDIGITS)
    path = '%s.%s' % (sanitize(ekey[0]), ekey[1])
    fname = dstore.export_path(path)
    dset = dstore['agg_risk']
    writer.save(dset[()], fname, dset.dtype.names)
    return [fname]
Esempio n. 9
0
def export_avg_gmf_csv(ekey, dstore):
    oq = dstore['oqparam']
    sitecol = dstore['sitecol'].complete
    data = dstore['avg_gmf'][:]  # shape (2, N, M)
    dic = {'site_id': sitecol.sids, 'lon': sitecol.lons, 'lat': sitecol.lats}
    for m, imt in enumerate(oq.imtls):
        dic['gmv_' + imt] = data[0, :, m]
        dic['gsd_' + imt] = data[1, :, m]
    fname = dstore.build_fname('avg_gmf', '', 'csv')
    writers.CsvWriter(fmt=writers.FIVEDIGITS).save(pandas.DataFrame(dic),
                                                   fname,
                                                   comment=dstore.metadata)
    return [fname]
Esempio n. 10
0
def export_aggregate_by_csv(ekey, dstore):
    """
    :param ekey: export key, i.e. a pair (datastore key, fmt)
    :param dstore: datastore object
    """
    token, what = ekey[0].split('/', 1)
    aw = extract(dstore, 'aggregate/' + what)
    fnames = []
    writer = writers.CsvWriter(fmt=writers.FIVEDIGITS)
    path = '%s.%s' % (sanitize(ekey[0]), ekey[1])
    fname = dstore.export_path(path)
    writer.save(aw.to_dframe(), fname)
    fnames.append(fname)
    return fnames
Esempio n. 11
0
def export_src_loss_table(ekey, dstore):
    """
    :param ekey: export key, i.e. a pair (datastore key, fmt)
    :param dstore: datastore object
    """
    oq = dstore['oqparam']
    md = dstore.metadata
    md.update(dict(investigation_time=oq.investigation_time,
                   risk_investigation_time=oq.risk_investigation_time or
                   oq.investigation_time))
    aw = hdf5.ArrayWrapper.from_(dstore['src_loss_table'], 'loss_value')
    dest = dstore.build_fname('src_loss_table', '', 'csv')
    writer = writers.CsvWriter(fmt=writers.FIVEDIGITS)
    writer.save(aw.to_dframe(), dest, comment=md)
    return writer.getsaved()
Esempio n. 12
0
def export_aggrisk(ekey, dstore):
    """
    :param ekey: export key, i.e. a pair (datastore key, fmt)
    :param dstore: datastore object
    """
    oq = dstore['oqparam']
    tagnames = oq.aggregate_by
    writer = writers.CsvWriter(fmt=writers.FIVEDIGITS)
    tagcol = dstore['assetcol/tagcol']
    aggtags = list(tagcol.get_aggkey(tagnames).values())
    aggtags.append(('*total*',) * len(tagnames))
    agg_values = dstore['agg_values'][()]  # shape K+1
    md = dstore.metadata
    md.update(dict(investigation_time=oq.investigation_time,
                   risk_investigation_time=oq.risk_investigation_time or
                   oq.investigation_time))

    aggrisk = dstore.read_df('aggrisk')
    cols = [col for col in aggrisk.columns
            if col not in {'agg_id', 'rlz_id', 'loss_id'}]
    csqs = [col for col in cols if not col.startswith('dmg_')]
    header = ['loss_type'] + tagnames + ['exposed_value'] + [
        '%s_ratio' % csq for csq in csqs]
    dest = dstore.build_fname('aggrisk', '', 'csv')
    out = general.AccumDict(accum=[])
    manyrlzs = hasattr(aggrisk, 'rlz_id') and len(aggrisk.rlz_id.unique()) > 1
    for (agg_id, loss_id), df in aggrisk.groupby(['agg_id', 'loss_id']):
        n = len(df)
        loss_type = oq.loss_types[loss_id]
        out['loss_type'].extend([loss_type] * n)
        for tagname, tag in zip(tagnames, aggtags[agg_id]):
            out[tagname].extend([tag] * n)
        if manyrlzs:
            out['rlz_id'].extend(df.rlz_id)
        for col in cols:
            if col in csqs:
                aval = scientific.get_agg_value(
                    col, agg_values, agg_id, loss_type)
                out[col + '_value'].extend(df[col])
                out[col + '_ratio'].extend(df[col] / aval)
            else:
                out[col].extend(df[col])
    dsdic = {'dmg_0': 'no_damage'}
    for s, ls in enumerate(oq.limit_states, 1):
        dsdic['dmg_%d' % s] = ls
    df = pandas.DataFrame(out).rename(columns=dsdic)
    writer.save(df, dest, header, comment=md)
    return [dest]
Esempio n. 13
0
def export_damages_csv(ekey, dstore):
    oq = dstore['oqparam']
    ebd = oq.calculation_mode == 'event_based_damage'
    dmg_dt = build_damage_dt(dstore)
    rlzs = dstore['full_lt'].get_realizations()
    orig = dstore[ekey[0]][:]  # shape (A, R, L, D)
    writer = writers.CsvWriter(fmt='%.6E')
    assets = get_assets(dstore)
    md = dstore.metadata
    if oq.investigation_time:
        rit = oq.risk_investigation_time or oq.investigation_time
        md.update(
            dict(investigation_time=oq.investigation_time,
                 risk_investigation_time=rit))
    D = len(oq.limit_states) + 1
    R = 1 if oq.collect_rlzs else len(rlzs)
    if ekey[0].endswith('stats'):
        rlzs_or_stats = oq.hazard_stats()
    else:
        rlzs_or_stats = ['rlz-%03d' % r for r in range(R)]
    name = ekey[0].split('-')[0]
    if oq.calculation_mode != 'classical_damage':
        name = 'avg_' + name
    for i, ros in enumerate(rlzs_or_stats):
        if ebd:  # export only the consequences from damages-rlzs, i == 0
            rate = len(dstore['events']) * oq.time_ratio / len(rlzs)
            data = orig[:, i] * rate
            A, L, Dc = data.shape
            if Dc == D:  # no consequences, export nothing
                return
            csq_dt = build_csq_dt(dstore)
            damages = numpy.zeros(A, csq_dt)
            for a in range(A):
                for li, lt in enumerate(csq_dt.names):
                    damages[lt][a] = tuple(data[a, li, D:Dc])
            fname = dstore.build_fname('avg_risk', ros, ekey[1])
        else:  # scenario_damage, classical_damage
            if oq.modal_damage_state:
                damages = modal_damage_array(orig[:, i], dmg_dt)
            else:
                damages = build_damage_array(orig[:, i], dmg_dt)
            fname = dstore.build_fname(name, ros, ekey[1])
        writer.save(compose_arrays(assets, damages),
                    fname,
                    comment=md,
                    renamedict=dict(id='asset_id'))
    return writer.getsaved()
Esempio n. 14
0
def export_aggcurves_csv(ekey, dstore):
    """
    :param ekey: export key, i.e. a pair (datastore key, fmt)
    :param dstore: datastore object
    """
    oq = dstore['oqparam']
    E = len(dstore['events'])
    R = len(dstore['weights'])
    lossnames = numpy.array(oq.loss_names)
    aggtags = get_agg_tags(dstore, oq.aggregate_by)
    df = dstore.read_df('aggcurves')
    consequences = [
        col for col in df.columns if col in scientific.KNOWN_CONSEQUENCES
    ]
    for tagname, tags in aggtags.items():
        df[tagname] = tags[df.agg_id]
    df['loss_type'] = lossnames[df.loss_id.to_numpy()]
    del df['loss_id']
    dest = dstore.export_path('%s.%s' % ekey)
    writer = writers.CsvWriter(fmt=writers.FIVEDIGITS)
    md = dstore.metadata
    md['risk_investigation_time'] = (oq.risk_investigation_time
                                     or oq.investigation_time)
    md['num_events'] = E
    md['effective_time'] = (oq.investigation_time *
                            oq.ses_per_logic_tree_path * R)
    md['limit_states'] = dstore.get_attr('aggcurves', 'limit_states')

    # aggcurves
    agg_id = df.pop('agg_id')

    agg_values = dstore['agg_values'][:]
    edic = general.AccumDict(accum=[])
    [loss_type] = df.loss_type.unique()
    for cons in consequences:
        for col in df.columns:
            if col not in scientific.KNOWN_CONSEQUENCES:
                edic[col].extend(df[col])
            elif col == cons:
                edic['conseq_value'].extend(df[col])
                edic['conseq_type'].extend([col] * len(df))
                aval = scientific.get_agg_value(cons, agg_values, agg_id,
                                                loss_type)
                edic['conseq_ratio'].extend(df[col] / aval)
    writer.save(pandas.DataFrame(edic), dest, comment=md)
    return [dest]
Esempio n. 15
0
def export_cond_spectra(ekey, dstore):
    dset = dstore[ekey[0]]  # shape (P, 2, M)
    periods = dset.attrs['periods']
    imls = dset.attrs['imls']
    writer = writers.CsvWriter(fmt=writers.FIVEDIGITS)
    fnames = []
    for p, iml in enumerate(imls):
        fname = dstore.export_path('conditional-spectrum-%d.csv' % p)
        df = pandas.DataFrame(
            dict(sa_period=periods,
                 spectrum_val=dset[p, 0],
                 spectrum_std=dset[p, 1]))
        comment = dstore.metadata.copy()
        comment['iml'] = iml
        writer.save(df, fname, comment=comment)
        fnames.append(fname)
    return fnames
Esempio n. 16
0
def export_bcr_map(ekey, dstore):
    oq = dstore['oqparam']
    assets = get_assets(dstore)
    bcr_data = dstore[ekey[0]]
    N, R = bcr_data.shape
    if ekey[0].endswith('stats'):
        rlzs_or_stats = oq.hazard_stats()
    else:
        rlzs_or_stats = ['rlz-%03d' % r for r in range(R)]
    fnames = []
    writer = writers.CsvWriter(fmt=writers.FIVEDIGITS)
    for t, ros in enumerate(rlzs_or_stats):
        path = dstore.build_fname('bcr', ros, 'csv')
        writer.save(compose_arrays(assets, bcr_data[:, t]), path,
                    renamedict=dict(id='asset_id'))
        fnames.append(path)
    return writer.getsaved()
Esempio n. 17
0
def export_avg_gmf_csv(ekey, dstore):
    oq = dstore['oqparam']
    sitecol = dstore['sitecol'].complete
    if 'custom_site_id' in sitecol.array.dtype.names:
        dic = dict(custom_site_id=decode(sitecol.custom_site_id))
    else:
        dic = dict(site_id=sitecol.sids)
    dic['lon'] = sitecol.lons
    dic['lat'] = sitecol.lats
    data = dstore['avg_gmf'][:]  # shape (2, N, M)
    for m, imt in enumerate(oq.imtls):
        dic['gmv_' + imt] = data[0, :, m]
        dic['gsd_' + imt] = data[1, :, m]
    fname = dstore.build_fname('avg_gmf', '', 'csv')
    writers.CsvWriter(fmt=writers.FIVEDIGITS).save(
        pandas.DataFrame(dic), fname, comment=dstore.metadata)
    return [fname]
Esempio n. 18
0
def export_aggcurves_csv(ekey, dstore):
    """
    :param ekey: export key, i.e. a pair (datastore key, fmt)
    :param dstore: datastore object
    """
    oq = dstore['oqparam']
    E = len(dstore['events'])
    R = len(dstore['weights'])
    lossnames = numpy.array(oq.loss_types)
    aggtags = get_agg_tags(dstore, oq.aggregate_by)
    df = dstore.read_df('aggcurves')
    consequences = [col for col in df.columns
                    if col in scientific.KNOWN_CONSEQUENCES]
    for tagname, tags in aggtags.items():
        df[tagname] = tags[df.agg_id]
    dest = dstore.export_path('%s.%s' % ekey)
    writer = writers.CsvWriter(fmt=writers.FIVEDIGITS)
    md = dstore.metadata
    md['risk_investigation_time'] = (oq.risk_investigation_time or
                                     oq.investigation_time)
    md['num_events'] = E
    md['effective_time'] = (
        oq.investigation_time * oq.ses_per_logic_tree_path * R)
    md['limit_states'] = dstore.get_attr('aggcurves', 'limit_states')

    # aggcurves
    agg_values = dstore['agg_values'][:]
    cols = [col for col in df.columns if col not in consequences
            and col not in ('agg_id', 'rlz_id', 'loss_id')]
    edic = general.AccumDict(accum=[])
    manyrlzs = not oq.collect_rlzs and R > 1
    for (agg_id, rlz_id, loss_id), d in df.groupby(
            ['agg_id', 'rlz_id', 'loss_id']):
        for col in cols:
            edic[col].extend(d[col])
        edic['loss_type'].extend([lossnames[loss_id]] * len(d))
        if manyrlzs:
            edic['rlz_id'].extend([rlz_id] * len(d))
        for cons in consequences:
            edic[cons + '_value'].extend(d[cons])
            aval = scientific.get_agg_value(
                cons, agg_values, agg_id, lossnames[loss_id])
            edic[cons + '_ratio'].extend(d[cons] / aval)
    writer.save(pandas.DataFrame(edic), dest, comment=md)
    return [dest]
Esempio n. 19
0
 def export_csv(self, spec, asset_refs, curves_dict):
     """
     :param asset_refs: names of the asset
     :param curves_dict: a dictionary tag -> loss curves
     """
     aval = self.assetcol.arr_value(self.loss_types)
     writer = writers.CsvWriter(fmt=writers.FIVEDIGITS)
     ebr = hasattr(self, 'builder')
     for key in sorted(curves_dict):
         recs = curves_dict[key]
         if ebr:
             data = [[
                 'asset_id', 'loss_type', 'loss_value', 'loss_ratio',
                 'return_period', 'annual_frequency_of_exceedence'
             ]]
         else:
             data = [[
                 'asset_id', 'loss_type', 'loss_value', 'loss_ratio', 'poe'
             ]]
         for li, lt in enumerate(self.loss_types):
             if ebr:  # event_based_risk
                 array = recs[:, :, li]  # shape (A, P, LI)
                 periods = self.builder.return_periods
                 for aref, losses, val in sorted(
                         zip(asset_refs, array, aval[:, li])):
                     for period, loss in zip(periods, losses):
                         data.append((aref, lt, loss, loss / val, period,
                                      1. / period))
             else:  # classical_risk
                 array = recs[lt]  # shape (A,) loss_curve_dt
                 for aref, losses, poes, val in sorted(
                         zip(asset_refs, array['losses'], array['poes'],
                             aval[:, li])):
                     for loss, poe in zip(losses, poes):
                         data.append((aref, lt, loss, loss / val, poe))
         dest = self.dstore.build_fname(
             'loss_curves', '%s-%s' % (spec, key) if spec else key, 'csv')
         com = dict(kind=key,
                    risk_investigation_time=self.oq.risk_investigation_time
                    or self.oq.investigation_time)
         writer.save(data,
                     dest,
                     comment=com,
                     renamedict=dict(id='asset_id'))
     return writer.getsaved()
Esempio n. 20
0
def export_aggcurves_csv(ekey, dstore):
    """
    :param ekey: export key, i.e. a pair (datastore key, fmt)
    :param dstore: datastore object
    """
    oq = dstore['oqparam']
    E = len(dstore['events'])
    R = len(dstore['weights'])
    lossnames = numpy.array(oq.loss_names)
    aggtags = get_agg_tags(dstore, oq.aggregate_by)
    df = dstore.read_df('aggcurves')
    for tagname, tags in aggtags.items():
        df[tagname] = tags[df.agg_id]
    df['loss_type'] = lossnames[df.loss_id.to_numpy()]
    del df['loss_id']
    dest1 = dstore.export_path('%s.%s' % ekey)
    dest2 = dstore.export_path('dmgcsq.%s' % ekey[1])
    writer = writers.CsvWriter(fmt=writers.FIVEDIGITS)
    md = dstore.metadata
    md['risk_investigation_time'] = (oq.risk_investigation_time or
                                     oq.investigation_time)
    md['num_events'] = E
    md['effective_time'] = (
        oq.investigation_time * oq.ses_per_logic_tree_path * R)
    md['limit_states'] = dstore.get_attr('aggcurves', 'limit_states')
    dmg_states = ['nodamage'] + md['limit_states'].split()

    # aggregate damages/consequences
    dmgcsq = df[df.return_period == 0].set_index('agg_id')  # length K+1
    agg_number = dstore['agg_number'][dmgcsq.index.to_numpy()]
    dmgs = [col for col in df.columns if col.startswith('dmg_')]
    dmg0 = agg_number * E * oq.time_ratio - dmgcsq[dmgs].to_numpy().sum(axis=1)
    dmgcsq.insert(0, 'dmg_0', dmg0)
    dmgcsq.insert(0, 'number', agg_number)
    del dmgcsq['return_period']
    writer.save(rename(dmgcsq, dmg_states), dest2, comment=md)

    # aggcurves
    del df['agg_id']
    writer.save(rename(df[df.return_period > 0], dmg_states),
                dest1, comment=md)
    return [dest1, dest2]
Esempio n. 21
0
def export_loss_maps_csv(ekey, dstore):
    kind = ekey[0].split('-')[1]  # rlzs or stats
    assets = get_assets(dstore)
    value = get_loss_maps(dstore, kind)
    oq = dstore['oqparam']
    if kind == 'rlzs':
        rlzs_or_stats = dstore['full_lt'].get_realizations()
    else:
        rlzs_or_stats = oq.hazard_stats()
    writer = writers.CsvWriter(fmt=writers.FIVEDIGITS)
    md = dstore.metadata
    for i, ros in enumerate(rlzs_or_stats):
        if hasattr(ros, 'ordinal'):  # is a realization
            ros = 'rlz-%d' % ros.ordinal
        fname = dstore.build_fname('loss_maps', ros, ekey[1])
        md.update(
            dict(kind=ros, risk_investigation_time=oq.risk_investigation_time
                 or oq.investigation_time))
        writer.save(compose_arrays(assets, value[:, i]), fname, comment=md,
                    renamedict=dict(id='asset_id'))
    return writer.getsaved()
Esempio n. 22
0
def export_cond_spectra(ekey, dstore):
    sitecol = dstore['sitecol']
    dset = dstore[ekey[0]]  # shape (1, M, N, 2, P)
    periods = dset.attrs['periods']
    imls = dset.attrs['imls']
    writer = writers.CsvWriter(fmt=writers.FIVEDIGITS)
    fnames = []
    for n in sitecol.sids:
        spe = dset[0, :, n, 0]  # shape M, P
        std = dset[0, :, n, 1]  # shape M, P
        fname = dstore.export_path('conditional-spectrum-%d.csv' % n)
        dic = dict(sa_period=periods)
        for p in range(len(imls)):
            dic['val%d' % p] = spe[:, p]
            dic['std%d' % p] = std[:, p]
        df = pandas.DataFrame(dic)
        comment = dstore.metadata.copy()
        comment['imls'] = list(imls)
        comment['site_id'] = n
        comment['lon'] = sitecol.lons[n]
        comment['lat'] = sitecol.lats[n]
        writer.save(df, fname, comment=comment)
        fnames.append(fname)
    return fnames