Example #1
0
def export_aggcurves_csv(ekey, dstore):
    """
    :param ekey: export key, i.e. a pair (datastore key, fmt)
    :param dstore: datastore object
    """
    oq = dstore['oqparam']
    E = len(dstore['events'])
    R = len(dstore['weights'])
    lossnames = numpy.array(oq.loss_names)
    aggtags = get_agg_tags(dstore, oq.aggregate_by)
    df = dstore.read_df('aggcurves')
    for tagname, tags in aggtags.items():
        df[tagname] = tags[df.agg_id]
    df['loss_type'] = lossnames[df.loss_id.to_numpy()]
    del df['loss_id']
    dest1 = dstore.export_path('%s.%s' % ekey)
    dest2 = dstore.export_path('dmgcsq.%s' % ekey[1])
    writer = writers.CsvWriter(fmt=writers.FIVEDIGITS)
    md = dstore.metadata
    md['risk_investigation_time'] = (oq.risk_investigation_time or
                                     oq.investigation_time)
    md['num_events'] = E
    md['effective_time'] = (
        oq.investigation_time * oq.ses_per_logic_tree_path * R)
    md['limit_states'] = dstore.get_attr('aggcurves', 'limit_states')
    dmg_states = ['nodamage'] + md['limit_states'].split()

    # aggregate damages/consequences
    dmgcsq = df[df.return_period == 0].set_index('agg_id')  # length K+1
    agg_number = dstore['agg_number'][dmgcsq.index.to_numpy()]
    dmgs = [col for col in df.columns if col.startswith('dmg_')]
    dmg0 = agg_number * E * oq.time_ratio - dmgcsq[dmgs].to_numpy().sum(axis=1)
    dmgcsq.insert(0, 'dmg_0', dmg0)
    dmgcsq.insert(0, 'number', agg_number)
    del dmgcsq['return_period']
    writer.save(rename(dmgcsq, dmg_states), dest2, comment=md)

    # aggcurves
    del df['agg_id']
    writer.save(rename(df[df.return_period > 0], dmg_states),
                dest1, comment=md)
    return [dest1, dest2]
Example #2
0
def export_agg_maps_csv(ekey, dstore):
    name, kind = ekey[0].split('-')
    oq = dstore['oqparam']
    tagcol = dstore['assetcol/tagcol']
    agg_maps = dstore[ekey[0]][()]  # shape (C, R, L, T...)
    R = agg_maps.shape[1]
    kinds = (['rlz-%03d' % r for r in range(R)]
             if ekey[0].endswith('-rlzs') else list(oq.hazard_stats()))
    clp = [str(p) for p in oq.conditional_loss_poes]
    dic = dict(tagnames=['clp', 'kind', 'loss_type'] + oq.aggregate_by,
               clp=['?'] + clp,
               kind=['?'] + kinds,
               loss_type=('?', ) + oq.loss_dt().names)
    for tagname in oq.aggregate_by:
        dic[tagname] = getattr(tagcol, tagname)
    aw = hdf5.ArrayWrapper(agg_maps, dic)
    writer = writers.CsvWriter(fmt=writers.FIVEDIGITS)
    fname = dstore.export_path('%s.%s' % ekey)
    writer.save(aw.to_table(), fname)
    return [fname]
Example #3
0
def export_loss_maps_csv(ekey, dstore):
    kind = ekey[0].split('-')[1]  # rlzs or stats
    assets = get_assets(dstore)
    value = get_loss_maps(dstore, kind)
    oq = dstore['oqparam']
    if kind == 'rlzs':
        rlzs_or_stats = dstore['full_lt'].get_realizations()
    else:
        rlzs_or_stats = oq.hazard_stats()
    writer = writers.CsvWriter(fmt=writers.FIVEDIGITS)
    md = dstore.metadata
    for i, ros in enumerate(rlzs_or_stats):
        if hasattr(ros, 'ordinal'):  # is a realization
            ros = 'rlz-%d' % ros.ordinal
        fname = dstore.build_fname('loss_maps', ros, ekey[1])
        md.update(
            dict(kind=ros, risk_investigation_time=oq.risk_investigation_time))
        writer.save(compose_arrays(assets, value[:, i]), fname, comment=md,
                    renamedict=dict(id='asset_id'))
    return writer.getsaved()
Example #4
0
def export_losses_by_asset(ekey, dstore):
    """
    :param ekey: export key, i.e. a pair (datastore key, fmt)
    :param dstore: datastore object
    """
    oq = dstore['oqparam']
    loss_dt = oq.loss_dt(stat_dt)
    losses_by_asset = dstore[ekey[0]][()]
    rlzs = dstore['csm_info'].get_rlzs_assoc().realizations
    assets = get_assets(dstore)
    writer = writers.CsvWriter(fmt=writers.FIVEDIGITS)
    md = dstore.metadata
    md.update(dict(investigation_time=oq.investigation_time,
                   risk_investigation_time=oq.risk_investigation_time))
    for rlz in rlzs:
        losses = losses_by_asset[:, rlz.ordinal]
        dest = dstore.build_fname('losses_by_asset', rlz, 'csv')
        data = compose_arrays(assets, losses.copy().view(loss_dt)[:, 0])
        writer.save(data, dest, comment=md, renamedict=dict(id='asset_id'))
    return writer.getsaved()
Example #5
0
def export_gmf_data_csv(ekey, dstore):
    oq = dstore['oqparam']
    rlzs_assoc = dstore['csm_info'].get_rlzs_assoc()
    imts = list(oq.imtls)
    if 'scenario' in oq.calculation_mode:
        imtls = oq.imtls
        gsims = [str(rlz.gsim_rlz) for rlz in rlzs_assoc.realizations]
        n_gmfs = oq.number_of_ground_motion_fields
        fields = ['%03d' % i for i in range(n_gmfs)]
        dt = numpy.dtype([(f, F32) for f in fields])
        eids, gmfs_ = calc.get_gmfs(dstore)
        sitemesh = get_mesh(dstore['sitecol'])
        writer = writers.CsvWriter(fmt='%.5f')
        for gsim, gmfa in zip(gsims, gmfs_):  # gmfa of shape (N, E, I)
            for imti, imt in enumerate(imtls):
                gmfs = numpy.zeros(len(gmfa), dt)
                for e, event in enumerate(dt.names):
                    gmfs[event] = gmfa[:, e, imti]
                dest = dstore.build_fname('gmf', '%s-%s' % (gsim, imt), 'csv')
                data = util.compose_arrays(sitemesh, gmfs)
                writer.save(data, dest)
        return writer.getsaved()
    else:  # event based
        eid = int(ekey[0].split('/')[1]) if '/' in ekey[0] else None
        gmfa = GmfDataGetter.gen_gmfs(dstore['gmf_data'], rlzs_assoc, eid)
        if eid is None:  # new format
            fname = dstore.build_fname('gmf', 'data', 'csv')
            gmfa.sort(order=['rlzi', 'sid', 'eid'])
            writers.write_csv(fname, _expand_gmv(gmfa, imts))
            return [fname]
        # old format for single eid
        fnames = []
        for rlzi, array in group_array(gmfa, 'rlzi').items():
            rlz = rlzs_assoc.realizations[rlzi]
            data, comment = _build_csv_data(
                array, rlz, dstore['sitecol'], imts, oq.investigation_time)
            fname = dstore.build_fname(
                'gmf', '%d-rlz-%03d' % (eid, rlzi), 'csv')
            writers.write_csv(fname, data, comment=comment)
            fnames.append(fname)
        return fnames
Example #6
0
def export_gmf_scenario(ekey, dstore):
    oq = dstore['oqparam']
    if 'scenario' in oq.calculation_mode:
        fields = ['%03d' % i for i in range(len(dstore['etags']))]
        dt = numpy.dtype([(f, F32) for f in fields])
        etags, gmfs_by_trt_gsim = calc.get_gmfs(dstore)
        sitemesh = dstore['sitemesh']
        writer = writers.CsvWriter(fmt='%.5f')
        for (trt, gsim), gmfs_ in gmfs_by_trt_gsim.items():
            for imt in gmfs_.dtype.names:
                gmfs = numpy.zeros(len(gmfs_), dt)
                for i in range(len(gmfs)):
                    gmfs[i] = tuple(gmfs_[imt][i])
                dest = dstore.export_path('gmf-%s-%s.csv' % (gsim, imt))
                data = util.compose_arrays(sitemesh, gmfs)
                writer.save(data, dest)
    else:  # event based
        logging.warn('Not exporting the full GMFs for event_based, but you can'
                     ' specify the rupture ordinals with gmfs:R1,...,Rn')
        return []
    return writer.getsaved()
Example #7
0
def export_loss_maps_csv(ekey, dstore):
    kind = ekey[0].split('-')[1]  # rlzs or stats
    assets = get_assets(dstore)
    value = get_loss_maps(dstore, kind)
    oq = dstore['oqparam']
    if kind == 'rlzs':
        tags = dstore['csm_info'].get_rlzs_assoc().realizations
    else:
        tags = oq.hazard_stats()
    writer = writers.CsvWriter(fmt=writers.FIVEDIGITS)
    md = dstore.metadata
    for i, tag in enumerate(tags):
        uid = getattr(tag, 'uid', tag)
        fname = dstore.build_fname('loss_maps', tag, ekey[1])
        md.update(
            dict(kind=uid, risk_investigation_time=oq.risk_investigation_time))
        writer.save(compose_arrays(assets, value[:, i]),
                    fname,
                    comment=md,
                    renamedict=dict(id='asset_id'))
    return writer.getsaved()
Example #8
0
def export_avg_losses(ekey, dstore):
    """
    :param ekey: export key, i.e. a pair (datastore key, fmt)
    :param dstore: datastore object
    """
    dskey = ekey[0]
    oq = dstore['oqparam']
    dt = [(ln, F32) for ln in oq.loss_names]
    name, value, tags = _get_data(dstore, dskey, oq.hazard_stats())
    writer = writers.CsvWriter(fmt=writers.FIVEDIGITS)
    assets = get_assets(dstore)
    md = dstore.metadata
    md.update(
        dict(investigation_time=oq.investigation_time,
             risk_investigation_time=oq.risk_investigation_time))
    for tag, values in zip(tags, value.transpose(1, 0, 2)):
        dest = dstore.build_fname(name, tag, 'csv')
        array = numpy.zeros(len(values), dt)
        for li, ln in enumerate(oq.loss_names):
            array[ln] = values[:, li]
        writer.save(compose_arrays(assets, array), dest, comment=md)
    return writer.getsaved()
Example #9
0
def export_losses_by_event(ekey, dstore):
    """
    :param ekey: export key, i.e. a pair (datastore key, fmt)
    :param dstore: datastore object
    """
    oq = dstore['oqparam']
    writer = writers.CsvWriter(fmt=writers.FIVEDIGITS)
    dest = dstore.build_fname('losses_by_event', '', 'csv')
    if (oq.calculation_mode.startswith('scenario')
            or oq.calculation_mode == 'ebrisk'):
        tagcol = dstore['assetcol/tagcol']
        lbe = dstore['losses_by_event'][()]
        lbe.sort(order='event_id')
        dic = dict(tagnames=['event_id'] + oq.aggregate_by)
        for tagname in oq.aggregate_by:
            dic[tagname] = getattr(tagcol, tagname)
        dic['event_id'] = ['?'] + list(lbe['event_id'])
        # example (0, 1, 2, 3) -> (0, 2, 3, 1)
        axis = [0] + list(range(2, len(lbe['loss'].shape))) + [1]
        data = lbe['loss'].transpose(axis)  # shape (E, T..., L)
        aw = hdf5.ArrayWrapper(data, dic, oq.loss_names)
        writer.save(aw.to_table(), dest)
    else:
        dtlist = [('event_id', U64), ('rlz_id', U16), ('rup_id', U32),
                  ('year', U32)] + oq.loss_dt_list()
        eids = dstore['losses_by_event']['event_id']
        events = dstore['events']
        year_of = year_dict(events['id'], oq.investigation_time, oq.ses_seed)
        arr = numpy.zeros(len(dstore['losses_by_event']), dtlist)
        arr['event_id'] = eids
        arr['rup_id'] = arr['event_id'] / TWO32
        arr['rlz_id'] = get_rlz_ids(events, eids)
        arr['year'] = [year_of[eid] for eid in eids]
        loss = dstore['losses_by_event']['loss'].T  # shape (L, E)
        for losses, loss_type in zip(loss, oq.loss_names):
            arr[loss_type] = losses
        writer.save(arr, dest)
    return writer.getsaved()
Example #10
0
def export_losses_by_taxon_csv(ekey, dstore):
    oq = dstore['oqparam']
    taxonomies = add_quotes(dstore['assetcol/taxonomies'].value)
    rlzs = dstore['csm_info'].get_rlzs_assoc().realizations
    loss_types = oq.loss_dt().names
    key, kind = ekey[0].split('-')
    value = dstore[key + '-rlzs'].value
    if kind == 'stats':
        weights = dstore['realizations']['weight']
        tags, stats = zip(*oq.risk_stats())
        value = compute_stats2(value, stats, weights)
    else:  # rlzs
        tags = rlzs
    writer = writers.CsvWriter(fmt=writers.FIVEDIGITS)
    dt = numpy.dtype([('taxonomy', taxonomies.dtype)] + oq.loss_dt_list())
    for tag, values in zip(tags, value.transpose(1, 0, 2)):
        fname = dstore.build_fname(key, tag, ekey[1])
        array = numpy.zeros(len(values), dt)
        array['taxonomy'] = taxonomies
        for l, lt in enumerate(loss_types):
            array[lt] = values[:, l]
        writer.save(array, fname)
    return writer.getsaved()
Example #11
0
def export_avg_losses(ekey, dstore):
    """
    :param ekey: export key, i.e. a pair (datastore key, fmt)
    :param dstore: datastore object
    """
    oq = dstore['oqparam']
    dt = oq.loss_dt()
    assets = get_assets(dstore)
    writer = writers.CsvWriter(fmt=writers.FIVEDIGITS)
    name, kind = ekey[0].split('-')
    value = dstore[name + '-rlzs'].value  # shape (A, R, L')
    if kind == 'stats':
        weights = dstore['realizations']['weight']
        tags, stats = zip(*oq.risk_stats())
        value = compute_stats2(value, stats, weights)
    else:  # rlzs
        tags = ['rlz-%03d' % r for r in range(len(dstore['realizations']))]
    for tag, values in zip(tags, value.transpose(1, 0, 2)):
        dest = dstore.build_fname(name, tag, 'csv')
        array = numpy.zeros(len(values), dt)
        for l, lt in enumerate(dt.names):
            array[lt] = values[:, l]
        writer.save(compose_arrays(assets, array), dest)
    return writer.getsaved()
Example #12
0
def export_agg_curve_rlzs(ekey, dstore):
    oq = dstore['oqparam']
    assetcol = dstore['assetcol']
    if ekey[0].startswith('agg_'):
        aggregate_by = oq.aggregate_by
    else:  # tot_curves
        aggregate_by = []

    name = '_'.join(['agg'] + oq.aggregate_by)
    aggvalue = dstore['exposed_values/' + name][()]

    lti = tag2idx(oq.loss_names)
    tagi = {
        tagname: tag2idx(getattr(assetcol.tagcol, tagname))
        for tagname in aggregate_by
    }

    def get_loss_ratio(rec):
        idxs = tuple(tagi[tagname][getattr(rec, tagname)] - 1
                     for tagname in aggregate_by) + (lti[rec.loss_types], )
        return rec.loss_value / aggvalue[idxs]

    # shape (T1, T2, ..., L)
    md = dstore.metadata
    md.update(
        dict(kind=ekey[0], risk_investigation_time=oq.risk_investigation_time))
    fname = dstore.export_path('%s.%s' % ekey)
    writer = writers.CsvWriter(fmt=writers.FIVEDIGITS)
    aw = hdf5.ArrayWrapper.from_(dstore[ekey[0]], 'loss_value')
    table = add_columns(
        aw.to_table(),
        loss_ratio=get_loss_ratio,
        annual_frequency_of_exceedence=lambda rec: 1 / rec.return_periods)
    table[0] = [c[:-1] if c.endswith('s') else c for c in table[0]]
    writer.save(table, fname, comment=md)
    return writer.getsaved()
Example #13
0
def export_agg_curve_rlzs(ekey, dstore):
    oq = dstore['oqparam']
    lnames = numpy.array(oq.loss_names)
    if oq.aggregate_by:
        agg_keys = dstore['agg_keys'][:]
    agg_tags = {}
    for tagname in oq.aggregate_by:
        agg_tags[tagname] = numpy.concatenate([agg_keys[tagname], ['*total*']])
    aggvalue = dstore['agg_values'][()]  # shape (K+1, L)
    md = dstore.metadata
    md['risk_investigation_time'] = oq.risk_investigation_time
    writer = writers.CsvWriter(fmt=writers.FIVEDIGITS)
    descr = hdf5.get_shape_descr(dstore[ekey[0]].attrs['json'])
    name, suffix = ekey[0].split('-')
    rlzs_or_stats = descr[suffix[:-1]]
    aw = hdf5.ArrayWrapper(dstore[ekey[0]], descr, ('loss_value', ))
    dataf = aw.to_dframe().set_index(suffix[:-1])
    for r, ros in enumerate(rlzs_or_stats):
        md['kind'] = f'{name}-' + (ros if isinstance(ros, str) else
                                   'rlz-%03d' % ros)
        try:
            df = dataf[dataf.index == ros]
        except KeyError:
            logging.warning('No data for %s', md['kind'])
            continue
        dic = {col: df[col].to_numpy() for col in dataf.columns}
        dic['loss_type'] = lnames[dic['lti']]
        for tagname in oq.aggregate_by:
            dic[tagname] = agg_tags[tagname][dic['agg_id']]
        dic['loss_ratio'] = dic['loss_value'] / aggvalue[dic['agg_id'],
                                                         dic.pop('lti')]
        dic['annual_frequency_of_exceedence'] = 1 / dic['return_period']
        del dic['agg_id']
        dest = dstore.build_fname(md['kind'], '', 'csv')
        writer.save(pandas.DataFrame(dic), dest, comment=md)
    return writer.getsaved()
Example #14
0
def export_agg_losses_ebr(ekey, dstore):
    """
    :param ekey: export key, i.e. a pair (datastore key, fmt)
    :param dstore: datastore object
    """
    agg_losses = dstore[ekey[0]]
    rlzs = dstore['rlzs_assoc'].realizations
    loss_types = dstore['riskmodel'].loss_types
    tags = dstore['tags'].value
    ext_loss_types = loss_types + [lt + '_ins' for lt in loss_types]
    ext_dt = numpy.dtype([('tag', (bytes, 100))] + [(elt, numpy.float32)
                                                    for elt in ext_loss_types])
    writer = writers.CsvWriter(fmt='%10.6E')
    for rlz in rlzs:
        rows = agg_losses[rlz.uid]
        data = []
        for row in rows:
            loss = row['loss']  # matrix L x 2
            data.append((tags[row['rup_id']], ) + tuple(loss[:, 0]) +
                        tuple(loss[:, 1]))
        data.sort()
        dest = dstore.export_path('agg_losses-rlz%03d.csv' % rlz.ordinal)
        writer.save(numpy.array(data, ext_dt), dest)
    return writer.getsaved()
Example #15
0
def export_bcr_map(ekey, dstore):
    assetcol = dstore['assetcol/array'].value
    aref = dstore['asset_refs'].value
    bcr_data = dstore['bcr-rlzs']
    N, R = bcr_data.shape
    realizations = dstore['csm_info'].get_rlzs_assoc().realizations
    loss_types = dstore.get_attr('composite_risk_model', 'loss_types')
    fnames = []
    writer = writers.CsvWriter(fmt=writers.FIVEDIGITS)
    for rlz in realizations:
        for l, loss_type in enumerate(loss_types):
            rlz_data = bcr_data[loss_type][:, rlz.ordinal]
            path = dstore.build_fname('bcr-%s' % loss_type, rlz, 'csv')
            data = [[
                'lon', 'lat', 'asset_ref', 'average_annual_loss_original',
                'average_annual_loss_retrofitted', 'bcr'
            ]]
            for ass, value in zip(assetcol, rlz_data):
                data.append((ass['lon'], ass['lat'], decode(aref[ass['idx']]),
                             value['annual_loss_orig'],
                             value['annual_loss_retro'], value['bcr']))
            writer.save(data, path)
            fnames.append(path)
    return writer.getsaved()
Example #16
0
def export_src_loss_table(ekey, dstore):
    """
    :param ekey: export key, i.e. a pair (datastore key, fmt)
    :param dstore: datastore object
    """
    oq = dstore['oqparam']
    trts = dstore['full_lt'].trts
    trt_by_source_id = {}
    for rec in dstore['source_info']:
        trt_by_source_id[rec['source_id'][:16]] = trts[rec['trti']]

    def get_trt(row):
        return trt_by_source_id[row.source]

    md = dstore.metadata
    md.update(
        dict(investigation_time=oq.investigation_time,
             risk_investigation_time=oq.risk_investigation_time))
    aw = hdf5.ArrayWrapper.from_(dstore['src_loss_table'], 'loss_value')
    dest = dstore.build_fname('src_loss_table', '', 'csv')
    writer = writers.CsvWriter(fmt=writers.FIVEDIGITS)
    rows = add_columns(aw.to_table(), trt=get_trt)
    writer.save(rows, dest, comment=md)
    return writer.getsaved()
Example #17
0
def export_dmg_by_event(ekey, dstore):
    """
    :param ekey: export key, i.e. a pair (datastore key, fmt)
    :param dstore: datastore object
    """
    damage_dt = build_damage_dt(dstore)
    dt_list = [('event_id', U32), ('rlz_id', U16)] + [
        (f, damage_dt.fields[f][0]) for f in damage_dt.names]
    dmg_by_event = dstore[ekey[0]][()]  # shape E, L, D
    events = dstore['events'][()]
    writer = writers.CsvWriter(fmt='%g')
    fname = dstore.build_fname('dmg_by_event', '', 'csv')
    writer.save(numpy.zeros(0, dt_list), fname)
    with open(fname, 'a') as dest:
        for rlz_id in numpy.unique(events['rlz_id']):
            ok, = numpy.where(events['rlz_id'] == rlz_id)
            arr = numpy.zeros(len(ok), dt_list)
            arr['event_id'] = events['id'][ok]
            arr['rlz_id'] = rlz_id
            for li, loss_type in enumerate(damage_dt.names):
                for d, dmg_state in enumerate(damage_dt[loss_type].names):
                    arr[loss_type][dmg_state] = dmg_by_event[ok, li, d]
            writer.save_block(arr, dest)
    return [fname]
Example #18
0
def export_dmg_by_event(ekey, dstore):
    """
    :param ekey: export key, i.e. a pair (datastore key, fmt)
    :param dstore: datastore object
    """
    damage_dt = build_damage_dt(dstore, mean_std=False)
    dt_list = [('event_id', numpy.uint64), ('rlzi', numpy.uint16)] + [
        (f, damage_dt.fields[f][0]) for f in damage_dt.names]
    all_losses = dstore[ekey[0]].value  # shape (E, R, LI)
    eids = dstore['events']['eid']
    rlzs = dstore['csm_info'].get_rlzs_assoc().realizations
    writer = writers.CsvWriter(fmt=writers.FIVEDIGITS)
    fname = dstore.build_fname('dmg_by_event', '', 'csv')
    writer.save(numpy.zeros(0, dt_list), fname)
    with open(fname, 'ab') as dest:
        for rlz in rlzs:
            data = all_losses[:, rlz.ordinal].copy().view(damage_dt)  # shape E
            arr = numpy.zeros(len(data), dt_list)
            arr['event_id'] = eids
            arr['rlzi'] = rlz.ordinal
            for field in damage_dt.names:
                arr[field] = data[field].squeeze()
            writer.save_block(arr, dest)
    return [fname]