示例#1
0
def export_damage(ekey, dstore):
    """
    :param ekey: export key, i.e. a pair (datastore key, fmt)
    :param dstore: datastore object
    """
    oqparam = dstore['oqparam']
    riskmodel = dstore['riskmodel']
    rlzs = dstore['rlzs_assoc'].realizations
    damages_by_key = dstore['damages_by_key']
    assetcol = dstore['assetcol']
    sitemesh = dstore['sitemesh']
    assetno = dict((ref, i) for i, ref in enumerate(assetcol['asset_ref']))
    dmg_states = [
        DmgState(s, i) for i, s in enumerate(riskmodel.damage_states)
    ]
    fnames = []
    for i in sorted(damages_by_key):
        rlz = rlzs[i]
        result = damages_by_key[i]
        dd_taxo = []
        dd_asset = []
        shape = oqparam.number_of_ground_motion_fields, len(dmg_states)
        totals = numpy.zeros(shape)  # R x D matrix
        for (key_type, key), values in result.items():
            if key_type == 'taxonomy':
                # values are fractions, R x D matrix
                totals += values
                means, stds = scientific.mean_std(values)
                for dmg_state, mean, std in zip(dmg_states, means, stds):
                    dd_taxo.append(
                        DmgDistPerTaxonomy(key, dmg_state, mean, std))
            elif key_type == 'asset':
                means, stddevs = values
                point = sitemesh[assetcol[assetno[key]]['site_id']]
                site = Site(point['lon'], point['lat'])
                for dmg_state, mean, std in zip(dmg_states, means, stddevs):
                    dd_asset.append(
                        DmgDistPerAsset(ExposureData(key, site), dmg_state,
                                        mean, std))
        dd_total = []
        for dmg_state, total in zip(dmg_states, totals.T):
            mean, std = scientific.mean_std(total)
            dd_total.append(DmgDistTotal(dmg_state, mean, std))

        suffix = '' if rlz.uid == '*' else '-gsimltp_%s' % rlz.uid
        f1 = export_dmg_xml(('dmg_dist_per_asset', 'xml'), oqparam.export_dir,
                            dmg_states, dd_asset, suffix)
        f2 = export_dmg_xml(('dmg_dist_per_taxonomy', 'xml'),
                            oqparam.export_dir, dmg_states, dd_taxo, suffix)
        f3 = export_dmg_xml(('dmg_dist_total', 'xml'), oqparam.export_dir,
                            dmg_states, dd_total, suffix)
        max_damage = dmg_states[-1]
        # the collapse map is extracted from the damage distribution per asset
        # (dda) by taking the value corresponding to the maximum damage
        collapse_map = [dda for dda in dd_asset if dda.dmg_state == max_damage]
        f4 = export_dmg_xml(('collapse_map', 'xml'), oqparam.export_dir,
                            dmg_states, collapse_map, suffix)
        fnames.extend(sum((f1 + f2 + f3 + f4).values(), []))
    return sorted(fnames)
示例#2
0
def scenario_damage(riskinputs, riskmodel, rlzs_assoc, monitor):
    """
    Core function for a damage computation.

    :param riskinputs:
        a list of :class:`openquake.risklib.riskinput.RiskInput` objects
    :param riskmodel:
        a :class:`openquake.risklib.riskinput.RiskModel` instance
    :param rlzs_assoc:
        a class:`openquake.commonlib.source.RlzsAssoc` instance
    :param monitor:
        :class:`openquake.baselib.performance.PerformanceMonitor` instance
    :returns:
        a dictionary {'d_asset': [(l, r, a, mean-stddev), ...],
                      'd_taxonomy': damage array of shape T, L, R, E, D,
                      'c_asset': [(l, r, a, mean-stddev), ...],
                      'c_taxonomy': damage array of shape T, L, R, E}

    `d_asset` and `d_taxonomy` are related to the damage distributions
    whereas `c_asset` and `c_taxonomy` are the consequence distributions.
    If there is no consequence model `c_asset` is an empty list and
    `c_taxonomy` is a zero-value array.
    """
    logging.info('Process %d, considering %d risk input(s) of weight %d',
                 os.getpid(), len(riskinputs),
                 sum(ri.weight for ri in riskinputs))
    c_models = monitor.consequence_models
    L = len(riskmodel.loss_types)
    R = len(rlzs_assoc.realizations)
    D = len(riskmodel.damage_states)
    E = monitor.oqparam.number_of_ground_motion_fields
    T = len(monitor.taxonomies)
    taxo2idx = {taxo: i for i, taxo in enumerate(monitor.taxonomies)}
    lt2idx = {lt: i for i, lt in enumerate(riskmodel.loss_types)}
    result = dict(d_asset=[],
                  d_taxon=numpy.zeros((T, L, R, E, D), F64),
                  c_asset=[],
                  c_taxon=numpy.zeros((T, L, R, E), F64))
    for out_by_rlz in riskmodel.gen_outputs(riskinputs, rlzs_assoc, monitor):
        for out in out_by_rlz:
            l = lt2idx[out.loss_type]
            r = out.hid
            c_model = c_models.get(out.loss_type)
            for asset, fraction in zip(out.assets, out.damages):
                t = taxo2idx[asset.taxonomy]
                damages = fraction * asset.number
                if c_model:  # compute consequences
                    means = [par[0] for par in c_model[asset.taxonomy].params]
                    # NB: we add a 0 in front for nodamage state
                    c_ratio = numpy.dot(fraction, [0] + means)
                    consequences = c_ratio * asset.value(out.loss_type)
                    result['c_asset'].append(
                        (l, r, asset.idx, scientific.mean_std(consequences)))
                    result['c_taxon'][t, l, r, :] += consequences
                    # TODO: consequences for the occupants
                result['d_asset'].append(
                    (l, r, asset.idx, scientific.mean_std(damages)))
                result['d_taxon'][t, l, r, :] += damages
    return result
def scenario_damage(riskinputs, riskmodel, rlzs_assoc, monitor):
    """
    Core function for a damage computation.

    :param riskinputs:
        a list of :class:`openquake.risklib.riskinput.RiskInput` objects
    :param riskmodel:
        a :class:`openquake.risklib.riskinput.RiskModel` instance
    :param rlzs_assoc:
        a class:`openquake.commonlib.source.RlzsAssoc` instance
    :param monitor:
        :class:`openquake.baselib.performance.PerformanceMonitor` instance
    :returns:
        a dictionary {'d_asset': [(l, r, a, mean-stddev), ...],
                      'd_taxonomy': damage array of shape T, L, R, E, D,
                      'c_asset': [(l, r, a, mean-stddev), ...],
                      'c_taxonomy': damage array of shape T, L, R, E}

    `d_asset` and `d_taxonomy` are related to the damage distributions
    whereas `c_asset` and `c_taxonomy` are the consequence distributions.
    If there is no consequence model `c_asset` is an empty list and
    `c_taxonomy` is a zero-value array.
    """
    logging.info('Process %d, considering %d risk input(s) of weight %d',
                 os.getpid(), len(riskinputs),
                 sum(ri.weight for ri in riskinputs))
    c_models = monitor.consequence_models
    L = len(riskmodel.loss_types)
    R = len(rlzs_assoc.realizations)
    D = len(riskmodel.damage_states)
    E = monitor.oqparam.number_of_ground_motion_fields
    T = len(monitor.taxonomies)
    taxo2idx = {taxo: i for i, taxo in enumerate(monitor.taxonomies)}
    lt2idx = {lt: i for i, lt in enumerate(riskmodel.loss_types)}
    result = dict(d_asset=[], d_taxon=numpy.zeros((T, L, R, E, D), F64),
                  c_asset=[], c_taxon=numpy.zeros((T, L, R, E), F64))
    for out_by_rlz in riskmodel.gen_outputs(
            riskinputs, rlzs_assoc, monitor):
        for out in out_by_rlz:
            l = lt2idx[out.loss_type]
            r = out.hid
            c_model = c_models.get(out.loss_type)
            for asset, fraction in zip(out.assets, out.damages):
                t = taxo2idx[asset.taxonomy]
                damages = fraction * asset.number
                if c_model:  # compute consequences
                    means = [par[0] for par in c_model[asset.taxonomy].params]
                    # NB: we add a 0 in front for nodamage state
                    c_ratio = numpy.dot(fraction, [0] + means)
                    consequences = c_ratio * asset.value(out.loss_type)
                    result['c_asset'].append(
                        (l, r, asset.idx, scientific.mean_std(consequences)))
                    result['c_taxon'][t, l, r, :] += consequences
                    # TODO: consequences for the occupants
                result['d_asset'].append(
                    (l, r, asset.idx, scientific.mean_std(damages)))
                result['d_taxon'][t, l, r, :] += damages
    return result
示例#4
0
def scenario_damage(riskinput, riskmodel, param, monitor):
    """
    Core function for a damage computation.

    :param riskinput:
        a :class:`openquake.risklib.riskinput.RiskInput` object
    :param riskmodel:
        a :class:`openquake.risklib.riskinput.CompositeRiskModel` instance
    :param monitor:
        :class:`openquake.baselib.performance.Monitor` instance
    :param param:
        dictionary of extra parameters
    :returns:
        a dictionary {'d_asset': [(l, r, a, mean-stddev), ...],
                      'd_tag': damage array of shape T, R, L, E, D,
                      'c_asset': [(l, r, a, mean-stddev), ...],
                      'c_tag': damage array of shape T, R, L, E}

    `d_asset` and `d_tag` are related to the damage distributions
    whereas `c_asset` and `c_tag` are the consequence distributions.
    If there is no consequence model `c_asset` is an empty list and
    `c_tag` is a zero-valued array.
    """
    c_models = param['consequence_models']
    L = len(riskmodel.loss_types)
    R = riskinput.hazard_getter.num_rlzs
    D = len(riskmodel.damage_states)
    E = param['number_of_ground_motion_fields']
    T = len(param['tags'])
    result = dict(d_asset=[],
                  d_tag=numpy.zeros((T, R, L, E, D), F64),
                  c_asset=[],
                  c_tag=numpy.zeros((T, R, L, E), F64))
    for outputs in riskmodel.gen_outputs(riskinput, monitor):
        r = outputs.rlzi
        for l, damages in enumerate(outputs):
            loss_type = riskmodel.loss_types[l]
            c_model = c_models.get(loss_type)
            for a, fraction in enumerate(damages):
                asset = outputs.assets[a]
                taxo = riskmodel.taxonomy[asset.taxonomy]
                damages = fraction * asset.number
                t = asset.tagmask(param['tags'])
                result['d_tag'][t, r, l] += damages  # shape (E, D)
                if c_model:  # compute consequences
                    means = [par[0] for par in c_model[taxo].params]
                    # NB: we add a 0 in front for nodamage state
                    c_ratio = numpy.dot(fraction, [0] + means)
                    consequences = c_ratio * asset.value(loss_type)
                    result['c_asset'].append(
                        (l, r, asset.ordinal,
                         scientific.mean_std(consequences)))
                    result['c_tag'][t, r, l] += consequences
                    # TODO: consequences for the occupants
                result['d_asset'].append(
                    (l, r, asset.ordinal, scientific.mean_std(damages)))
    result['gmdata'] = riskinput.gmdata
    return result
示例#5
0
def export_damage(ekey, dstore):
    """
    :param ekey: export key, i.e. a pair (datastore key, fmt)
    :param dstore: datastore object
    """
    oqparam = dstore['oqparam']
    riskmodel = dstore['riskmodel']
    rlzs = dstore['rlzs_assoc'].realizations
    damages_by_key = dstore['damages_by_key']
    assetcol = dstore['/assetcol']
    sitemesh = dstore['/sitemesh']
    assetno = dict((ref, i) for i, ref in enumerate(assetcol['asset_ref']))
    dmg_states = [DmgState(s, i)
                  for i, s in enumerate(riskmodel.damage_states)]
    fnames = []
    for i in sorted(damages_by_key):
        rlz = rlzs[i]
        result = damages_by_key[i]
        dd_taxo = []
        dd_asset = []
        shape = oqparam.number_of_ground_motion_fields, len(dmg_states)
        totals = numpy.zeros(shape)  # R x D matrix
        for (key_type, key), values in result.iteritems():
            if key_type == 'taxonomy':
                # values are fractions, R x D matrix
                totals += values
                means, stds = scientific.mean_std(values)
                for dmg_state, mean, std in zip(dmg_states, means, stds):
                    dd_taxo.append(
                        DmgDistPerTaxonomy(key, dmg_state, mean, std))
            elif key_type == 'asset':
                means, stddevs = values
                point = sitemesh[assetcol[assetno[key]]['site_id']]
                site = Site(point['lon'], point['lat'])
                for dmg_state, mean, std in zip(dmg_states, means, stddevs):
                    dd_asset.append(
                        DmgDistPerAsset(
                            ExposureData(key, site), dmg_state, mean, std))
        dd_total = []
        for dmg_state, total in zip(dmg_states, totals.T):
            mean, std = scientific.mean_std(total)
            dd_total.append(DmgDistTotal(dmg_state, mean, std))

        suffix = '' if rlz.uid == '*' else '-gsimltp_%s' % rlz.uid
        f1 = export_dmg_xml(('dmg_dist_per_asset', 'xml'), oqparam.export_dir,
                            dmg_states, dd_asset, suffix)
        f2 = export_dmg_xml(('dmg_dist_per_taxonomy', 'xml'),
                            oqparam.export_dir, dmg_states, dd_taxo, suffix)
        f3 = export_dmg_xml(('dmg_dist_total', 'xml'), oqparam.export_dir,
                            dmg_states, dd_total, suffix)
        max_damage = dmg_states[-1]
        # the collapse map is extracted from the damage distribution per asset
        # (dda) by taking the value corresponding to the maximum damage
        collapse_map = [dda for dda in dd_asset if dda.dmg_state == max_damage]
        f4 = export_dmg_xml(('collapse_map', 'xml'), oqparam.export_dir,
                            dmg_states, collapse_map, suffix)
        fnames.extend(sum((f1 + f2 + f3 + f4).values(), []))
    return sorted(fnames)
示例#6
0
def scenario_damage(riskinput, riskmodel, monitor):
    """
    Core function for a damage computation.

    :param riskinput:
        a :class:`openquake.risklib.riskinput.RiskInput` object
    :param riskmodel:
        a :class:`openquake.risklib.riskinput.CompositeRiskModel` instance
    :param monitor:
        :class:`openquake.baselib.performance.Monitor` instance
    :returns:
        a dictionary {'d_asset': [(l, r, a, mean-stddev), ...],
                      'd_taxonomy': damage array of shape T, L, R, E, D,
                      'c_asset': [(l, r, a, mean-stddev), ...],
                      'c_taxonomy': damage array of shape T, L, R, E}

    `d_asset` and `d_taxonomy` are related to the damage distributions
    whereas `c_asset` and `c_taxonomy` are the consequence distributions.
    If there is no consequence model `c_asset` is an empty list and
    `c_taxonomy` is a zero-value array.
    """
    c_models = monitor.consequence_models
    L = len(riskmodel.loss_types)
    R = len(riskinput.rlzs)
    D = len(riskmodel.damage_states)
    E = monitor.oqparam.number_of_ground_motion_fields
    T = len(monitor.taxonomies)
    taxo2idx = {taxo: i for i, taxo in enumerate(monitor.taxonomies)}
    result = dict(d_asset=[], d_taxon=numpy.zeros((T, L, R, E, D), F64),
                  c_asset=[], c_taxon=numpy.zeros((T, L, R, E), F64))
    for out in riskmodel.gen_outputs(riskinput, monitor):
        l, r = out.lr
        c_model = c_models.get(out.loss_type)
        for asset, fraction in zip(out.assets, out.damages):
            t = taxo2idx[asset.taxonomy]
            damages = fraction * asset.number
            if c_model:  # compute consequences
                means = [par[0] for par in c_model[asset.taxonomy].params]
                # NB: we add a 0 in front for nodamage state
                c_ratio = numpy.dot(fraction, [0] + means)
                consequences = c_ratio * asset.value(out.loss_type)
                result['c_asset'].append(
                    (l, r, asset.ordinal,
                     scientific.mean_std(consequences)))
                result['c_taxon'][t, l, r, :] += consequences
                # TODO: consequences for the occupants
            result['d_asset'].append(
                (l, r, asset.ordinal, scientific.mean_std(damages)))
            result['d_taxon'][t, l, r, :] += damages
    return result
示例#7
0
def export_risk(ekey, dstore):
    """
    Export the loss curves of a given realization in CSV format.

    :param ekey: export key, i.e. a pair (datastore key, fmt)
    :param dstore: datastore object
    """
    oqparam = dstore['oqparam']
    unit_by_lt = {riskmodels.cost_type_to_loss_type(ct['name']): ct['unit']
                  for ct in dstore['cost_types']}
    unit_by_lt['fatalities'] = 'people'
    rlzs = dstore['rlzs_assoc'].realizations
    losses_by_key = dstore['losses_by_key']
    fnames = []
    for i in sorted(losses_by_key):
        rlz = rlzs[i]
        result = losses_by_key[i]
        suffix = '' if rlz.uid == '*' else '-gsimltp_%s' % rlz.uid
        losses = AccumDict()
        for key, values in result.iteritems():
            key_type, loss_type = key
            unit = unit_by_lt[loss_type]
            if key_type in ('agg', 'ins'):
                mean, std = scientific.mean_std(values)
                losses += {key_type: [
                    AggLoss(loss_type, unit, mean, std)]}
            else:
                losses += {key_type: [
                    PerAssetLoss(loss_type, unit, *vals) for vals in values]}
        for key_type in losses:
            out = export_loss_csv((key_type, 'csv'),
                                  oqparam.export_dir, losses[key_type], suffix)
            fnames.append(out)
    return sorted(fnames)
示例#8
0
def scenario_damage(riskinputs, riskmodel, rlzs_assoc, monitor):
    """
    Core function for a damage computation.

    :param riskinputs:
        a list of :class:`openquake.risklib.riskinput.RiskInput` objects
    :param riskmodel:
        a :class:`openquake.risklib.riskinput.RiskModel` instance
    :param rlzs_assoc:
        a class:`openquake.commonlib.source.RlzsAssoc` instance
    :param monitor:
        :class:`openquake.commonlib.parallel.PerformanceMonitor` instance
    :returns:
        a dictionary {('asset', asset): <mean stddev>,
                      ('taxonomy', asset.taxonomy): <damage array>}
    """
    logging.info('Process %d, considering %d risk input(s) of weight %d',
                 os.getpid(), len(riskinputs),
                 sum(ri.weight for ri in riskinputs))
    ordinals = list(range(len(rlzs_assoc.realizations)))
    result = AccumDict({i: AccumDict() for i in ordinals})
    # ordinal -> (key_type, key) -> array
    for out_by_rlz in riskmodel.gen_outputs(riskinputs, rlzs_assoc, monitor):
        for out in out_by_rlz:
            for asset, fraction in zip(out.assets, out.damages):
                damages = fraction * asset.number
                result[out.hid] += {
                    ('asset', asset.id): scientific.mean_std(damages)
                }
                result[out.hid] += {('taxonomy', asset.taxonomy): damages}
    return result
示例#9
0
def scenario_damage(riskinputs, riskmodel, monitor):
    """
    Core function for a damage computation.

    :param riskinputs:
        a list of :class:`openquake.risklib.workflows.RiskInput` objects
    :param riskmodel:
        a :class:`openquake.risklib.workflows.RiskModel` instance
    :param monitor:
        :class:`openquake.commonlib.parallel.PerformanceMonitor` instance
    :returns:
        a dictionary {('asset', asset): <mean stddev>,
                      ('taxonomy', asset.taxonomy): <damage array>}
    """
    logging.info('Process %d, considering %d risk input(s) of weight %d',
                 os.getpid(), len(riskinputs),
                 sum(ri.weight for ri in riskinputs))
    with monitor:
        result = AccumDict()  # (key_type, key) -> result
        for loss_type, (assets, fractions) in \
                riskmodel.gen_outputs(riskinputs):
            for asset, fraction in zip(assets, fractions):
                damages = fraction * asset.number
                result += {('asset', asset): scientific.mean_std(damages)}
                result += {('taxonomy', asset.taxonomy): damages}
    return result
示例#10
0
    def post_execute(self, result):
        """
        Compute stats for the aggregated distributions and save
        the results on the datastore.
        """
        ltypes = self.riskmodel.loss_types
        dt_list = [('mean', F32), ('stddev', F32)]
        if self.oqparam.insured_losses:
            dt_list.extend([('mean_ins', F32), ('stddev_ins', F32)])
        stat_dt = numpy.dtype(dt_list)
        multi_stat_dt = numpy.dtype([(lt, stat_dt) for lt in ltypes])
        with self.monitor('saving outputs', autoflush=True):
            R = len(self.rlzs_assoc.realizations)
            N = len(self.assetcol)

            # agg losses
            agglosses = numpy.zeros(R, multi_stat_dt)
            mean, std = scientific.mean_std(result['agg'])
            for l, lt in enumerate(ltypes):
                agg = agglosses[lt]
                agg['mean'] = numpy.float32(mean[l, :, 0])
                agg['stddev'] = numpy.float32(std[l, :, 0])
                if self.oqparam.insured_losses:
                    agg['mean_ins'] = numpy.float32(mean[l, :, 1])
                    agg['stddev_ins'] = numpy.float32(std[l, :, 1])

            # average losses
            avglosses = numpy.zeros((N, R), multi_stat_dt)
            for (l, r, aid, stat) in result['avg']:
                avglosses[ltypes[l]][aid, r] = stat
            self.datastore['losses_by_asset'] = avglosses
            self.datastore['agglosses-rlzs'] = agglosses
示例#11
0
    def post_execute(self, result):
        """
        Compute stats for the aggregated distributions and save
        the results on the datastore.
        """
        ltypes = self.riskmodel.loss_types
        dt_list = [('mean', F32), ('stddev', F32)]
        if self.oqparam.insured_losses:
            dt_list.extend([('mean_ins', F32), ('stddev_ins', F32)])
        stat_dt = numpy.dtype(dt_list)
        multi_stat_dt = numpy.dtype([(lt, stat_dt) for lt in ltypes])
        with self.monitor('saving outputs', autoflush=True):
            R = len(self.rlzs_assoc.realizations)
            N = len(self.assetcol)

            # agg losses
            agglosses = numpy.zeros(R, multi_stat_dt)
            mean, std = scientific.mean_std(result['agg'])
            for l, lt in enumerate(ltypes):
                agg = agglosses[lt]
                agg['mean'] = numpy.float32(mean[l, :, 0])
                agg['stddev'] = numpy.float32(std[l, :, 0])
                if self.oqparam.insured_losses:
                    agg['mean_ins'] = numpy.float32(mean[l, :, 1])
                    agg['stddev_ins'] = numpy.float32(std[l, :, 1])

            # average losses
            avglosses = numpy.zeros((N, R), multi_stat_dt)
            for (l, r, aid, stat) in result['avg']:
                avglosses[ltypes[l]][aid, r] = stat
            self.datastore['losses_by_asset'] = avglosses
            self.datastore['agglosses-rlzs'] = agglosses
示例#12
0
 def post_execute(self, result):
     """
     Compute stats for the aggregated distributions and save
     the results on the datastore.
     """
     with self.monitor('saving outputs', autoflush=True):
         L = len(self.riskmodel.loss_types)
         R = len(self.rlzs_assoc.realizations)
         N = len(self.assetcol)
         arr = dict(avg=numpy.zeros((N, L, R), stat_dt),
                    agg=numpy.zeros((L, R), stat_dt))
         for (l, r), res in result.items():
             for keytype, key in res:
                 if keytype == 'agg':
                     agg_losses = arr[keytype][l, r]
                     mean, std = scientific.mean_std(res[keytype, key])
                     if key == 0:
                         agg_losses['mean'] = mean
                         agg_losses['stddev'] = std
                     else:
                         agg_losses['mean_ins'] = mean
                         agg_losses['stddev_ins'] = std
                 else:
                     arr[keytype][key, l, r] = res[keytype, key]
         self.datastore['avglosses'] = arr['avg']
         self.datastore['agglosses'] = arr['agg']
示例#13
0
def scenario_damage(riskinputs, riskmodel, rlzs_assoc, monitor):
    """
    Core function for a damage computation.

    :param riskinputs:
        a list of :class:`openquake.risklib.riskinput.RiskInput` objects
    :param riskmodel:
        a :class:`openquake.risklib.riskinput.RiskModel` instance
    :param rlzs_assoc:
        a class:`openquake.commonlib.source.RlzsAssoc` instance
    :param monitor:
        :class:`openquake.commonlib.parallel.PerformanceMonitor` instance
    :returns:
        a dictionary {('asset', asset): <mean stddev>,
                      ('taxonomy', asset.taxonomy): <damage array>}
    """
    logging.info('Process %d, considering %d risk input(s) of weight %d',
                 os.getpid(), len(riskinputs),
                 sum(ri.weight for ri in riskinputs))
    ordinals = list(range(len(rlzs_assoc.realizations)))
    result = AccumDict({i: AccumDict() for i in ordinals})
    # ordinal -> (key_type, key) -> array
    for out_by_rlz in riskmodel.gen_outputs(
            riskinputs, rlzs_assoc, monitor):
        for out in out_by_rlz:
            for asset, fraction in zip(out.assets, out.damages):
                damages = fraction * asset.number
                result[out.hid] += {
                    ('asset', asset.id): scientific.mean_std(damages)}
                result[out.hid] += {
                    ('taxonomy', asset.taxonomy): damages}
    return result
示例#14
0
def scenario_damage(riskinputs, crmodel, param, monitor):
    """
    Core function for a damage computation.

    :param riskinputs:
        :class:`openquake.risklib.riskinput.RiskInput` objects
    :param crmodel:
        a :class:`openquake.risklib.riskinput.CompositeRiskModel` instance
    :param monitor:
        :class:`openquake.baselib.performance.Monitor` instance
    :param param:
        dictionary of extra parameters
    :returns:
        a dictionary {'d_asset': [(l, r, a, mean-stddev), ...],
                      'd_event': damage array of shape R, L, E, D,
                      'c_asset': [(l, r, a, mean-stddev), ...],
                      'c_event': damage array of shape R, L, E}

    `d_asset` and `d_tag` are related to the damage distributions
    whereas `c_asset` and `c_tag` are the consequence distributions.
    If there is no consequence model `c_asset` is an empty list and
    `c_tag` is a zero-valued array.
    """
    L = len(crmodel.loss_types)
    D = len(crmodel.damage_states)
    E = param['number_of_ground_motion_fields']
    R = riskinputs[0].hazard_getter.num_rlzs
    result = dict(d_asset=[],
                  d_event=numpy.zeros((E, R, L, D), F64),
                  c_asset=[],
                  c_event=numpy.zeros((E, R, L), F64))
    for ri in riskinputs:
        for out in ri.gen_outputs(crmodel, monitor):
            r = out.rlzi
            for l, loss_type in enumerate(crmodel.loss_types):
                for asset, fractions in zip(ri.assets, out[loss_type]):
                    dmg = fractions[:, :D] * asset['number']  # shape (E, D)
                    result['d_event'][:, r, l] += dmg
                    result['d_asset'].append(
                        (l, r, asset['ordinal'], scientific.mean_std(dmg)))
                    if crmodel.has('consequence'):
                        csq = fractions[:, D] * asset['value-' + loss_type]
                        result['c_asset'].append(
                            (l, r, asset['ordinal'], scientific.mean_std(csq)))
                        result['c_event'][:, r, l] += csq
    return result
示例#15
0
def scenario_damage(riskinputs, riskmodel, param, monitor):
    """
    Core function for a damage computation.

    :param riskinputs:
        :class:`openquake.risklib.riskinput.RiskInput` objects
    :param riskmodel:
        a :class:`openquake.risklib.riskinput.CompositeRiskModel` instance
    :param monitor:
        :class:`openquake.baselib.performance.Monitor` instance
    :param param:
        dictionary of extra parameters
    :returns:
        a dictionary {'d_asset': [(l, r, a, mean-stddev), ...],
                      'd_event': damage array of shape R, L, E, D,
                      'c_asset': [(l, r, a, mean-stddev), ...],
                      'c_event': damage array of shape R, L, E}

    `d_asset` and `d_tag` are related to the damage distributions
    whereas `c_asset` and `c_tag` are the consequence distributions.
    If there is no consequence model `c_asset` is an empty list and
    `c_tag` is a zero-valued array.
    """
    L = len(riskmodel.loss_types)
    D = len(riskmodel.damage_states)
    E = param['number_of_ground_motion_fields']
    R = riskinputs[0].hazard_getter.num_rlzs
    result = dict(d_asset=[], d_event=numpy.zeros((E, R, L, D), F64),
                  c_asset=[], c_event=numpy.zeros((E, R, L), F64))
    for ri in riskinputs:
        for out in riskmodel.gen_outputs(ri, monitor):
            r = out.rlzi
            for l, loss_type in enumerate(riskmodel.loss_types):
                for asset, fractions in zip(ri.assets, out[loss_type]):
                    dmg = fractions[:, :D] * asset['number']  # shape (E, D)
                    result['d_event'][:, r, l] += dmg
                    result['d_asset'].append(
                        (l, r, asset['ordinal'], scientific.mean_std(dmg)))
                    if riskmodel.has('consequence'):
                        csq = fractions[:, D] * asset['value-' + loss_type]
                        result['c_asset'].append(
                            (l, r, asset['ordinal'], scientific.mean_std(csq)))
                        result['c_event'][:, r, l] += csq
    return result
示例#16
0
    def post_execute(self, result):
        """
        :param result: a dictionary {
             ('asset', asset): <mean stddev>,
             ('taxonomy', asset.taxonomy): <damage array>}
        :returns: a dictionary {
             'dmg_per_asset': /path/to/dmg_per_asset.xml,
             'dmg_per_taxonomy': /path/to/dmg_per_taxonomy.xml,
             'dmg_total': /path/to/dmg_total.xml}
        """
        dmg_states = [DmgState(s, i)
                      for i, s in enumerate(self.riskmodel.damage_states)]
        dd_taxo = []
        dd_asset = []
        shape = self.oqparam.number_of_ground_motion_fields, len(dmg_states)
        totals = numpy.zeros(shape)  # R x D matrix
        for (key_type, key), values in result.iteritems():
            if key_type == 'taxonomy':
                # values are fractions, R x D matrix
                totals += values
                means, stds = scientific.mean_std(values)
                for dmg_state, mean, std in zip(dmg_states, means, stds):
                    dd_taxo.append(
                        DmgDistPerTaxonomy(key, dmg_state, mean, std))
            elif key_type == 'asset':
                # values are mean and stddev, at D x 2 matrix
                for dmg_state, mean_std in zip(dmg_states, values):
                    dd_asset.append(
                        DmgDistPerAsset(
                            ExposureData(key.id, Site(key.location)),
                            dmg_state, mean_std[0], mean_std[1]))
        dd_total = []
        for dmg_state, total in zip(dmg_states, totals):
            mean, std = scientific.mean_std(total)
            dd_total.append(DmgDistTotal(dmg_state, mean, std))

        # export
        f1 = export('dmg_per_asset_xml', self.oqparam.export_dir,
                    self.riskmodel.damage_states, dd_asset)
        f2 = export('dmg_per_taxonomy_xml', self.oqparam.export_dir,
                    self.riskmodel.damage_states, dd_taxo)
        f3 = export('dmg_total_xml', self.oqparam.export_dir,
                    self.riskmodel.damage_states, dd_total)
        return f1 + f2 + f3
示例#17
0
def dmg_by_taxon(agg_damage, stat_dt):
    """
    :param agg_damage: array of shape (T, L, R, E, D)
    :param stat_dt: numpy dtype for statistical outputs
    :returns: array of shape (T, L, R) with records of type stat_dt
    """
    T, L, R, E, D = agg_damage.shape
    out = numpy.zeros((T, L, R), stat_dt)
    for t, l, r in itertools.product(range(T), range(L), range(R)):
        out[t, l, r] = scientific.mean_std(agg_damage[t, l, r])
    return out
示例#18
0
    def post_execute(self, result):
        """
        Compute stats for the aggregated distributions and save
        the results on the datastore.
        """
        loss_dt = self.oqparam.loss_dt()
        L = len(loss_dt.names)
        dtlist = [('event_id', U32), ('loss', (F32, (L,)))]
        R = self.R
        with self.monitor('saving outputs'):
            A = len(self.assetcol)

            # agg losses
            res = result['agg']
            E, L = res.shape
            agglosses = numpy.zeros((R, L), stat_dt)
            for r in range(R):
                mean, std = scientific.mean_std(res[self.rlzs == r])
                agglosses[r]['mean'] = mean
                agglosses[r]['stddev'] = std

            # avg losses
            losses_by_asset = numpy.zeros((A, R, L), F64)
            for (l, r, aid, avg) in result['avg']:
                losses_by_asset[aid, r, l] = avg

            self.datastore['avg_losses-rlzs'] = losses_by_asset
            set_rlzs_stats(self.datastore, 'avg_losses',
                           asset_id=self.assetcol['id'],
                           loss_type=self.oqparam.loss_names)
            self.datastore['agglosses'] = agglosses

            # losses by event
            lbe = numpy.zeros(E, dtlist)
            lbe['event_id'] = range(E)
            lbe['loss'] = res
            self.datastore['event_loss_table/,'] = lbe
            loss_types = self.oqparam.loss_dt().names
            self.datastore.set_attrs(
                'event_loss_table/,', loss_types=loss_types)

            # sanity check
            totlosses = losses_by_asset.sum(axis=0)
            msg = ('%s, rlz=%d: the total loss %s is different from the sum '
                   'of the average losses %s')
            for r in range(R):
                for l, name in enumerate(loss_dt.names):
                    totloss = totlosses[r, l]
                    aggloss = agglosses[r, l]['mean']
                    if not numpy.allclose(totloss, aggloss, rtol=1E-6):
                        logging.warning(msg, name, r, totloss, aggloss)
        logging.info('Mean portfolio loss\n' +
                     views.view('portfolio_loss', self.datastore))
示例#19
0
def dmg_total(agg_damage, stat_dt):
    """
    :param agg_damage: array of shape (T, L, R, E, D)
    :param stat_dt: numpy dtype for statistical outputs
    :returns: array of shape (L, R) with records of type stat_dt
    """
    T, L, R, E, D = agg_damage.shape
    total = agg_damage.sum(axis=0)
    out = numpy.zeros((L, R), stat_dt)
    for l, r in itertools.product(range(L), range(R)):
        out[l, r] = scientific.mean_std(total[l, r])
    return out
示例#20
0
def total_damage_distribution(fractions, dmg_state_ids):
    """
    Save the total distribution, by summing over all assets and taxonomies.

    :param fractions: numpy array with the damage fractions
    :param dmg_state_ids:
       a list of  IDs of instances of
       :class:`openquake.engine.db.models.DmgState` ordered by `lsi`
    """
    means, stds = scientific.mean_std(fractions)
    for mean, std, dmg_state in zip(means, stds, dmg_state_ids):
        models.DmgDistTotal.objects.create(dmg_state_id=dmg_state, mean=mean, stddev=std)
示例#21
0
def total_damage_distribution(fractions, dmg_state_ids):
    """
    Save the total distribution, by summing over all assets and taxonomies.

    :param fractions: numpy array with the damage fractions
    :param dmg_state_ids:
       a list of  IDs of instances of
       :class:`openquake.engine.db.models.DmgState` ordered by `lsi`
    """
    means, stds = scientific.mean_std(fractions)
    for mean, std, dmg_state in zip(means, stds, dmg_state_ids):
        models.DmgDistTotal.objects.create(
            dmg_state_id=dmg_state, mean=mean, stddev=std)
示例#22
0
def dist_by_taxon(data, multi_stat_dt):
    """
    :param data: array of shape (T, R, L, ...)
    :param multi_stat_dt: numpy dtype for statistical outputs
    :returns: array of shape (T, R) with records of type multi_stat_dt
    """
    T, R, L = data.shape[:3]
    out = numpy.zeros((T, R), multi_stat_dt)
    for l, lt in enumerate(multi_stat_dt.names):
        out_lt = out[lt]
        for t, r in itertools.product(range(T), range(R)):
            out_lt[t, r] = scientific.mean_std(data[t, r, l])
    return out
示例#23
0
文件: core.py 项目: 4x/oq-engine
def save_dist_total(fractions, rc_id):
    """
    Save the total distribution, by summing over all assets and taxonomies.

    :param fractions: numpy array with the damage fractions
    :param int rc_id: the risk_calculation_id
    """
    dmg_states = models.DmgState.objects.filter(risk_calculation__id=rc_id)
    mean, std = scientific.mean_std(fractions)
    for dmg_state in dmg_states:
        lsi = dmg_state.lsi
        ddt = models.DmgDistTotal(dmg_state=dmg_state, mean=mean[lsi], stddev=std[lsi])
        ddt.save()
示例#24
0
def dist_by_taxon(data, multi_stat_dt):
    """
    :param data: array of shape (T, L, R, ...)
    :param multi_stat_dt: numpy dtype for statistical outputs
    :returns: array of shape (T, R) with records of type multi_stat_dt
    """
    T, L, R = data.shape[:3]
    out = numpy.zeros((T, R), multi_stat_dt)
    for l, lt in enumerate(multi_stat_dt.names):
        out_lt = out[lt]
        for t, r in itertools.product(range(T), range(R)):
            out_lt[t, r] = scientific.mean_std(data[t, l, r])
    return out
示例#25
0
    def post_execute(self, result):
        """
        Compute stats for the aggregated distributions and save
        the results on the datastore.
        """
        loss_dt = self.oqparam.loss_dt()
        LI = len(loss_dt.names)
        dtlist = [('eid', U64), ('rlzi', U16), ('loss', (F32, LI))]
        I = self.oqparam.insured_losses + 1
        R = self.R
        with self.monitor('saving outputs', autoflush=True):
            A = len(self.assetcol)

            # agg losses
            res = result['agg']
            E, LI = res.shape
            L = LI // I
            mean, std = scientific.mean_std(res)  # shape LI
            agglosses = numpy.zeros(LI, stat_dt)
            agglosses['mean'] = F32(mean)
            agglosses['stddev'] = F32(std)

            # losses by asset
            losses_by_asset = numpy.zeros((A, R, LI), stat_dt)
            for (l, r, aid, stat) in result['avg']:
                for i in range(I):
                    losses_by_asset[aid, r, l + L * i] = stat[i]
            self.datastore['losses_by_asset'] = losses_by_asset
            self.datastore['agglosses'] = agglosses

            # losses by event
            num_gmfs = self.oqparam.number_of_ground_motion_fields
            lbe = numpy.fromiter(
                ((ei, ei // num_gmfs, res[ei]) for ei in range(E)), dtlist)
            self.datastore['losses_by_event'] = lbe
            loss_types = ' '.join(self.oqparam.loss_dt().names)
            self.datastore.set_attrs('losses_by_event', loss_types=loss_types)

            # all losses
            if self.oqparam.asset_loss_table:
                array = numpy.zeros((A, E), loss_dt)
                for (l, r), losses_by_aid in result['all_losses'].items():
                    slc = self.event_slice(r)
                    for aid in losses_by_aid:
                        lba = losses_by_aid[aid]  # (E, I)
                        for i in range(I):
                            lt = loss_dt.names[l + L * i]
                            array[lt][aid, slc] = lba[:, i]
                self.datastore['asset_loss_table'] = array
                tags = [encode(tag) for tag in self.assetcol.tagcol]
                self.datastore.set_attrs('asset_loss_table', tags=tags)
示例#26
0
def dist_total(data, multi_stat_dt):
    """
    :param data: array of shape (T, R, L, ...)
    :param multi_stat_dt: numpy dtype for statistical outputs
    :returns: array of shape (R,) with records of type multi_stat_dt
    """
    T, R, L = data.shape[:3]
    total = data.sum(axis=0)
    out = numpy.zeros(R, multi_stat_dt)
    for l, lt in enumerate(multi_stat_dt.names):
        out_lt = out[lt]
        for r in range(R):
            out_lt[r] = scientific.mean_std(total[r, l])
    return out
示例#27
0
def damage_distribution_per_taxonomy(fractions, dmg_state_ids, taxonomy):
    """
    Save the damage distribution for a given taxonomy, by summing over
    all assets.

    :param fractions: numpy array with the damage fractions
    :param dmg_state_ids:
       a list of  IDs of instances of
       :class:`openquake.engine.db.models.DmgState` ordered by `lsi`
    :param str: the taxonomy string
    """
    means, stddevs = scientific.mean_std(fractions)
    for dmg_state_id, mean, stddev in zip(dmg_state_ids, means, stddevs):
        models.DmgDistPerTaxonomy.objects.create(dmg_state_id=dmg_state_id, mean=mean, stddev=stddev, taxonomy=taxonomy)
示例#28
0
def dist_total(data, multi_stat_dt):
    """
    :param data: array of shape (T, L, R, ...)
    :param multi_stat_dt: numpy dtype for statistical outputs
    :returns: array of shape (R,) with records of type multi_stat_dt
    """
    T, L, R = data.shape[:3]
    total = data.sum(axis=0)
    out = numpy.zeros(R, multi_stat_dt)
    for l, lt in enumerate(multi_stat_dt.names):
        out_lt = out[lt]
        for r in range(R):
            out_lt[r] = scientific.mean_std(total[l, r])
    return out
示例#29
0
文件: core.py 项目: 4x/oq-engine
def save_dist_per_asset(fractions, rc_id, asset):
    """
    Save the damage distribution for a given asset.

    :param fractions: numpy array with the damage fractions
    :param rc_id: the risk_calculation_id
    :param asset: an ExposureData instance
    """
    dmg_states = models.DmgState.objects.filter(risk_calculation__id=rc_id)
    mean, std = scientific.mean_std(fractions)
    for dmg_state in dmg_states:
        lsi = dmg_state.lsi
        ddpa = models.DmgDistPerAsset(dmg_state=dmg_state, mean=mean[lsi], stddev=std[lsi], exposure_data=asset)
        ddpa.save()
    def post_execute(self, result):
        """
        Compute stats for the aggregated distributions and save
        the results on the datastore.
        """
        loss_dt = self.oqparam.loss_dt()
        L = len(loss_dt.names)
        dtlist = [('event_id', U32), ('rlzi', U16), ('loss', (F32, (L, )))]
        R = self.R
        with self.monitor('saving outputs'):
            A = len(self.assetcol)

            # agg losses
            res = result['agg']
            E, L = res.shape
            agglosses = numpy.zeros((R, L), stat_dt)
            for r in range(R):
                mean, std = scientific.mean_std(res[self.event_slice(r)])
                agglosses[r]['mean'] = F32(mean)
                agglosses[r]['stddev'] = F32(std)

            # losses by asset
            losses_by_asset = numpy.zeros((A, R, L), stat_dt)
            for (l, r, aid, stat) in result['avg']:
                losses_by_asset[aid, r, l] = stat
            self.datastore['losses_by_asset'] = losses_by_asset
            self.datastore['agglosses'] = agglosses

            # losses by event
            lbe = numpy.zeros(E, dtlist)
            lbe['event_id'] = range(E)
            lbe['rlzi'] = (lbe['event_id'] //
                           self.oqparam.number_of_ground_motion_fields)
            lbe['loss'] = res
            self.datastore['losses_by_event'] = lbe
            loss_types = self.oqparam.loss_dt().names
            self.datastore.set_attrs('losses_by_event', loss_types=loss_types)

            # all losses
            if self.oqparam.asset_loss_table:
                array = numpy.zeros((A, E), loss_dt)
                for (l, r), losses_by_aid in result['all_losses'].items():
                    slc = self.event_slice(r)
                    for aid in losses_by_aid:
                        lba = losses_by_aid[aid]  # E
                        lt = loss_dt.names[l]
                        array[lt][aid, slc] = lba
                self.datastore['asset_loss_table'] = array
                tags = [encode(tag) for tag in self.assetcol.tagcol]
                self.datastore.set_attrs('asset_loss_table', tags=tags)
示例#31
0
    def post_execute(self, result):
        """
        Compute stats for the aggregated distributions and save
        the results on the datastore.
        """
        loss_dt = self.oqparam.loss_dt()
        I = self.oqparam.insured_losses + 1
        with self.monitor('saving outputs', autoflush=True):
            A = len(self.assetcol)

            # agg losses
            res = result['agg']
            E, R, LI = res.shape
            L = LI // I
            mean, std = scientific.mean_std(res)  # shape (R, LI)
            agglosses = numpy.zeros((R, L * I), stat_dt)
            agglosses['mean'] = F32(mean)
            agglosses['stddev'] = F32(std)

            # losses by taxonomy
            taxid = {
                t: i
                for i, t in enumerate(sorted(self.assetcol.taxonomies))
            }
            T = len(taxid)
            dset = self.datastore.create_dset('losses_by_taxon-rlzs', F32,
                                              (T, R, LI))
            for tax, array in result['losses_by_taxon'].items():
                dset[taxid[tax]] = array

            # losses by asset
            losses_by_asset = numpy.zeros((A, R, L * I), stat_dt)
            for (l, r, aid, stat) in result['avg']:
                for i in range(I):
                    losses_by_asset[aid, r, l + L * i] = stat[i]
            self.datastore['losses_by_asset'] = losses_by_asset
            self.datastore['agglosses-rlzs'] = agglosses

            # losses by event
            self.datastore['losses_by_event'] = res  # shape (E, R, LI)

            if self.oqparam.asset_loss_table:
                array = numpy.zeros((A, E, R), loss_dt)
                for (l, r), losses_by_aid in result['all_losses'].items():
                    for aid in losses_by_aid:
                        lba = losses_by_aid[aid]  # (E, I)
                        for i in range(I):
                            lt = loss_dt.names[l + L * i]
                            array[lt][aid, :, r] = lba[:, i]
                self.datastore['all_losses-rlzs'] = array
示例#32
0
 def post_execute(self, result):
     """
     Export the aggregate loss curves in CSV format.
     """
     aggcurves = general.AccumDict()  # key_type -> AggLossCurves
     for (key_type, loss_type), values in result.iteritems():
         mean, std = scientific.mean_std(values)
         curve = AggLossCurve(loss_type, self.unit[loss_type], mean, std)
         aggcurves += {key_type: [curve]}
     out = {}
     for key_type in aggcurves:
         fname = export('%s_loss_csv' % key_type, self.oqparam.export_dir,
                        aggcurves[key_type])
         out[key_type] = fname
     return out
示例#33
0
文件: core.py 项目: 4x/oq-engine
def save_dist_per_taxonomy(fractions, rc_id, taxonomy):
    """
    Save the damage distribution for a given taxonomy, by summing over
    all assets.

    :param fractions: numpy array with the damage fractions
    :param int rc_id: the risk_calculation_id
    :param str: the taxonomy string
    """
    dmg_states = models.DmgState.objects.filter(risk_calculation__id=rc_id)
    mean, std = scientific.mean_std(fractions)
    for dmg_state in dmg_states:
        lsi = dmg_state.lsi
        ddpt = models.DmgDistPerTaxonomy(dmg_state=dmg_state, mean=mean[lsi], stddev=std[lsi], taxonomy=taxonomy)
        ddpt.save()
示例#34
0
def damage_distribution_per_taxonomy(fractions, dmg_state_ids, taxonomy):
    """
    Save the damage distribution for a given taxonomy, by summing over
    all assets.

    :param fractions: numpy array with the damage fractions
    :param dmg_state_ids:
       a list of  IDs of instances of
       :class:`openquake.engine.db.models.DmgState` ordered by `lsi`
    :param str: the taxonomy string
    """
    means, stddevs = scientific.mean_std(fractions)
    for dmg_state_id, mean, stddev in zip(dmg_state_ids, means, stddevs):
        models.DmgDistPerTaxonomy.objects.create(
            dmg_state_id=dmg_state_id,
            mean=mean, stddev=stddev, taxonomy=taxonomy)
示例#35
0
    def post_execute(self, result):
        """
        Compute stats for the aggregated distributions and save
        the results on the datastore.
        """
        loss_dt = self.oqparam.loss_dt()
        LI = len(loss_dt.names)
        dtlist = [('eid', U64), ('loss', (F32, LI))]
        R = self.R
        with self.monitor('saving outputs', autoflush=True):
            A = len(self.assetcol)

            # agg losses
            res = result['agg']
            E, L = res.shape
            agglosses = numpy.zeros((R, L), stat_dt)
            for r in range(R):
                mean, std = scientific.mean_std(res[self.event_slice(r)])
                agglosses[r]['mean'] = F32(mean)
                agglosses[r]['stddev'] = F32(std)

            # losses by asset
            losses_by_asset = numpy.zeros((A, R, L), stat_dt)
            for (l, r, aid, stat) in result['avg']:
                losses_by_asset[aid, r, l] = stat
            self.datastore['losses_by_asset'] = losses_by_asset
            self.datastore['agglosses'] = agglosses

            # losses by event
            lbe = numpy.fromiter(((ei, res[ei]) for ei in range(E)), dtlist)
            self.datastore['losses_by_event'] = lbe
            loss_types = ' '.join(self.oqparam.loss_dt().names)
            self.datastore.set_attrs('losses_by_event', loss_types=loss_types)

            # all losses
            if self.oqparam.asset_loss_table:
                array = numpy.zeros((A, E), loss_dt)
                for (l, r), losses_by_aid in result['all_losses'].items():
                    slc = self.event_slice(r)
                    for aid in losses_by_aid:
                        lba = losses_by_aid[aid]  # E
                        lt = loss_dt.names[l]
                        array[lt][aid, slc] = lba
                self.datastore['asset_loss_table'] = array
                tags = [encode(tag) for tag in self.assetcol.tagcol]
                self.datastore.set_attrs('asset_loss_table', tags=tags)
示例#36
0
    def post_execute(self, result):
        """
        Compute stats for the aggregated distributions and save
        the results on the datastore.
        """
        loss_dt = self.oqparam.loss_dt()
        I = self.oqparam.insured_losses + 1
        with self.monitor('saving outputs', autoflush=True):
            A, T = self.tagmask.shape

            # agg losses
            res = result['agg']
            E, R, LI = res.shape
            L = LI // I
            mean, std = scientific.mean_std(res)  # shape (R, LI)
            agglosses = numpy.zeros((R, L * I), stat_dt)
            agglosses['mean'] = F32(mean)
            agglosses['stddev'] = F32(std)

            # losses by tag
            self.datastore['losses_by_tag-rlzs'] = result['losses_by_tag']
            tags = [tag.encode('ascii') for tag in self.assetcol.tags()]
            self.datastore.set_attrs('losses_by_tag-rlzs',
                                     tags=tags,
                                     nbytes=result['losses_by_tag'].nbytes)

            # losses by asset
            losses_by_asset = numpy.zeros((A, R, L * I), stat_dt)
            for (l, r, aid, stat) in result['avg']:
                for i in range(I):
                    losses_by_asset[aid, r, l + L * i] = stat[i]
            self.datastore['losses_by_asset'] = losses_by_asset
            self.datastore['agglosses-rlzs'] = agglosses

            # losses by event
            self.datastore['losses_by_event'] = res  # shape (E, R, LI)

            if self.oqparam.asset_loss_table:
                array = numpy.zeros((A, E, R), loss_dt)
                for (l, r), losses_by_aid in result['all_losses'].items():
                    for aid in losses_by_aid:
                        lba = losses_by_aid[aid]  # (E, I)
                        for i in range(I):
                            lt = loss_dt.names[l + L * i]
                            array[lt][aid, :, r] = lba[:, i]
                self.datastore['all_losses-rlzs'] = array
示例#37
0
def damage_distribution(assets, fraction_matrix, dmg_state_ids):
    """
    Save the damage distribution for a given asset.
    :param assets:
       a list of ExposureData instances
    :param fraction_matrix:
       numpy array with the damage fractions for each asset
    :param dmg_state_ids:
       a list of  IDs of instances of
       :class:`openquake.engine.db.models.DmgState` ordered by `lsi`
    """
    for fractions, asset in zip(fraction_matrix, assets):
        fractions *= asset.number_of_units
        means, stds = scientific.mean_std(fractions)

        for mean, std, dmg_state_id in zip(means, stds, dmg_state_ids):
            models.DmgDistPerAsset.objects.create(dmg_state_id=dmg_state_id, mean=mean, stddev=std, exposure_data=asset)
示例#38
0
    def post_execute(self, result):
        """
        Compute stats for the aggregated distributions and save
        the results on the datastore.
        """
        loss_dt = self.oqparam.loss_dt()
        L = len(loss_dt.names)
        dtlist = [('event_id', U32), ('loss', (F32, (L,)))]
        R = self.R
        with self.monitor('saving outputs'):
            A = len(self.assetcol)

            # agg losses
            res = result['agg']
            E, L = res.shape
            agglosses = numpy.zeros((R, L), stat_dt)
            for r in range(R):
                mean, std = scientific.mean_std(res[self.rlzs == r])
                agglosses[r]['mean'] = F32(mean)
                agglosses[r]['stddev'] = F32(std)

            # avg losses
            losses_by_asset = numpy.zeros((A, R, L), F32)
            for (l, r, aid, avg) in result['avg']:
                losses_by_asset[aid, r, l] = avg

            self.datastore['avg_losses-rlzs'] = losses_by_asset
            set_rlzs_stats(self.datastore, 'avg_losses',
                           asset_id=self.assetcol['id'],
                           loss_type=self.oqparam.loss_names)
            self.datastore['agglosses'] = agglosses

            # losses by event
            lbe = numpy.zeros(E, dtlist)
            lbe['event_id'] = range(E)
            lbe['loss'] = res
            self.datastore['losses_by_event'] = lbe
            loss_types = self.oqparam.loss_dt().names
            self.datastore.set_attrs('losses_by_event', loss_types=loss_types)

            # sanity check
            numpy.testing.assert_allclose(
                losses_by_asset.sum(axis=0), agglosses['mean'], rtol=1E-4)
        logging.info('Mean portfolio loss\n' +
                     views.view('portfolio_loss', self.datastore))
示例#39
0
    def post_execute(self, result):
        """
        Compute stats for the aggregated distributions and save
        the results on the datastore.
        """
        loss_dt = self.oqparam.loss_dt()
        LI = len(loss_dt.names)
        dtlist = [('eid', U64), ('loss', (F32, LI))]
        R = self.R
        with self.monitor('saving outputs', autoflush=True):
            A = len(self.assetcol)

            # agg losses
            res = result['agg']
            E, L = res.shape
            mean, std = scientific.mean_std(res)  # shape L
            agglosses = numpy.zeros(L, stat_dt)
            agglosses['mean'] = F32(mean)
            agglosses['stddev'] = F32(std)

            # losses by asset
            losses_by_asset = numpy.zeros((A, R, L), stat_dt)
            for (l, r, aid, stat) in result['avg']:
                losses_by_asset[aid, r, l] = stat
            self.datastore['losses_by_asset'] = losses_by_asset
            self.datastore['agglosses'] = agglosses

            # losses by event
            lbe = numpy.fromiter(((ei, res[ei]) for ei in range(E)), dtlist)
            self.datastore['losses_by_event'] = lbe
            loss_types = ' '.join(self.oqparam.loss_dt().names)
            self.datastore.set_attrs('losses_by_event', loss_types=loss_types)

            # all losses
            if self.oqparam.asset_loss_table:
                array = numpy.zeros((A, E), loss_dt)
                for (l, r), losses_by_aid in result['all_losses'].items():
                    slc = self.event_slice(r)
                    for aid in losses_by_aid:
                        lba = losses_by_aid[aid]  # E
                        lt = loss_dt.names[l]
                        array[lt][aid, slc] = lba
                self.datastore['asset_loss_table'] = array
                tags = [encode(tag) for tag in self.assetcol.tagcol]
                self.datastore.set_attrs('asset_loss_table', tags=tags)
示例#40
0
def damage_distribution(assets, fraction_matrix, dmg_state_ids):
    """
    Save the damage distribution for a given asset.
    :param assets:
       a list of ExposureData instances
    :param fraction_matrix:
       numpy array with the damage fractions for each asset
    :param dmg_state_ids:
       a list of  IDs of instances of
       :class:`openquake.engine.db.models.DmgState` ordered by `lsi`
    """
    for fractions, asset in zip(fraction_matrix, assets):
        fractions *= asset.number_of_units
        means, stds = scientific.mean_std(fractions)

        for mean, std, dmg_state_id in zip(means, stds, dmg_state_ids):
            models.DmgDistPerAsset.objects.create(
                dmg_state_id=dmg_state_id,
                mean=mean, stddev=std, exposure_data=asset)
示例#41
0
    def post_execute(self, result):
        """
        Compute stats for the aggregated distributions and save
        the results on the datastore.
        """
        loss_dt = self.oqparam.loss_dt()
        L = len(loss_dt.names)
        dtlist = [('event_id', U32), ('rlzi', U16), ('loss', (F32, (L,)))]
        R = self.R
        with self.monitor('saving outputs'):
            A = len(self.assetcol)

            # agg losses
            res = result['agg']
            E, L = res.shape
            agglosses = numpy.zeros((R, L), stat_dt)
            for r in range(R):
                mean, std = scientific.mean_std(res[self.event_slice(r)])
                agglosses[r]['mean'] = F32(mean)
                agglosses[r]['stddev'] = F32(std)

            # losses by asset
            losses_by_asset = numpy.zeros((A, R, L), F32)
            for (l, r, aid, avg) in result['avg']:
                losses_by_asset[aid, r, l] = avg
            self.datastore['avg_losses-rlzs'] = losses_by_asset
            set_rlzs_stats(self.datastore, 'avg_losses',
                           asset_id=self.assetcol['id'],
                           loss_type=self.oqparam.loss_names)
            self.datastore['agglosses'] = agglosses

            # losses by event
            lbe = numpy.zeros(E, dtlist)
            lbe['event_id'] = range(E)
            lbe['rlzi'] = (lbe['event_id'] //
                           self.oqparam.number_of_ground_motion_fields)
            lbe['loss'] = res
            self.datastore['losses_by_event'] = lbe
            loss_types = self.oqparam.loss_dt().names
            self.datastore.set_attrs('losses_by_event', loss_types=loss_types)
示例#42
0
def export_risk(ekey, dstore):
    """
    Export the loss curves of a given realization in CSV format.

    :param ekey: export key, i.e. a pair (datastore key, fmt)
    :param dstore: datastore object
    """
    oqparam = dstore['oqparam']
    unit_by_lt = {
        riskmodels.cost_type_to_loss_type(ct['name']): ct['unit']
        for ct in dstore['cost_types']
    }
    unit_by_lt['fatalities'] = 'people'
    rlzs = dstore['rlzs_assoc'].realizations
    losses_by_key = dstore['losses_by_key']
    fnames = []
    for i in sorted(losses_by_key):
        rlz = rlzs[i]
        result = losses_by_key[i]
        suffix = '' if rlz.uid == '*' else '-gsimltp_%s' % rlz.uid
        losses = AccumDict()
        for key, values in result.items():
            key_type, loss_type = key
            unit = unit_by_lt[loss_type]
            if key_type in ('agg', 'ins'):
                mean, std = scientific.mean_std(values)
                losses += {key_type: [AggLoss(loss_type, unit, mean, std)]}
            else:
                losses += {
                    key_type:
                    [PerAssetLoss(loss_type, unit, *vals) for vals in values]
                }
        for key_type in losses:
            out = export_loss_csv((key_type, 'csv'), oqparam.export_dir,
                                  losses[key_type], suffix)
            fnames.append(out)
    return sorted(fnames)
示例#43
0
def scenario_damage(riskinputs, riskmodel, rlzs_assoc, monitor):
    """
    Core function for a damage computation.

    :param riskinputs:
        a list of :class:`openquake.risklib.riskinput.RiskInput` objects
    :param riskmodel:
        a :class:`openquake.risklib.riskinput.RiskModel` instance
    :param rlzs_assoc:
        a class:`openquake.commonlib.source.RlzsAssoc` instance
    :param monitor:
        :class:`openquake.baselib.performance.PerformanceMonitor` instance
    :returns:
        a dictionary {('asset', asset): <mean stddev>,
                      ('taxonomy', asset.taxonomy): <damage array>}
    """
    logging.info('Process %d, considering %d risk input(s) of weight %d',
                 os.getpid(), len(riskinputs),
                 sum(ri.weight for ri in riskinputs))
    L = len(riskmodel.loss_types)
    R = len(rlzs_assoc.realizations)
    # D = len(riskmodel.damage_states)
    taxo2idx = {taxo: i for i, taxo in enumerate(monitor.taxonomies)}
    lt2idx = {lt: i for i, lt in enumerate(riskmodel.loss_types)}
    result = calc.build_dict((L, R), AccumDict)
    for out_by_rlz in riskmodel.gen_outputs(riskinputs, rlzs_assoc, monitor):
        for out in out_by_rlz:
            lti = lt2idx[out.loss_type]
            for asset, fraction in zip(out.assets, out.damages):
                damages = fraction * asset.number
                result[lti, out.hid] += {
                    ('asset', asset.idx): scientific.mean_std(damages)
                }
                result[lti, out.hid] += {
                    ('taxon', taxo2idx[asset.taxonomy]): damages
                }
    return result
示例#44
0
def scenario_damage(riskinputs, crmodel, param, monitor):
    """
    Core function for a damage computation.

    :param riskinputs:
        :class:`openquake.risklib.riskinput.RiskInput` objects
    :param crmodel:
        a :class:`openquake.risklib.riskinput.CompositeRiskModel` instance
    :param monitor:
        :class:`openquake.baselib.performance.Monitor` instance
    :param param:
        dictionary of extra parameters
    :returns:
        a dictionary {'d_asset': [(l, r, a, mean-stddev), ...],
                      'd_event': damage array of shape R, L, E, D,
                      + optional consequences}

    `d_asset` and `d_tag` are related to the damage distributions.
    """
    L = len(crmodel.loss_types)
    D = len(crmodel.damage_states)
    consequences = crmodel.get_consequences()
    collapse_threshold = param['collapse_threshold']
    haz_mon = monitor('getting hazard', measuremem=False)
    rsk_mon = monitor('aggregating risk', measuremem=False)
    acc = AccumDict(accum=numpy.zeros((L, D), F64))  # must be 64 bit
    res = {'d_event': acc}
    for name in consequences:
        res[name + '_by_event'] = AccumDict(accum=numpy.zeros(L, F64))
    for ri in riskinputs:
        # otherwise test 4b will randomly break with last digit changes
        # in dmg_by_event :-(
        result = dict(d_asset=[])
        for name in consequences:
            result[name + '_by_asset'] = []
        ddic = AccumDict(accum=numpy.zeros((L, D - 1), F32))  # aid,eid->dd
        with haz_mon:
            ri.hazard_getter.init()
        for out in ri.gen_outputs(crmodel, monitor):
            with rsk_mon:
                r = out.rlzi
                for l, loss_type in enumerate(crmodel.loss_types):
                    for asset, fractions in zip(ri.assets, out[loss_type]):
                        aid = asset['ordinal']
                        dmg = fractions * asset['number']  # shape (F, D)
                        for e, dmgdist in enumerate(dmg):
                            eid = out.eids[e]
                            acc[eid][l] += dmgdist
                            if dmgdist[-1] >= collapse_threshold:
                                ddic[aid, eid][l] = fractions[e, 1:]
                        result['d_asset'].append(
                            (l, r, asset['ordinal'], scientific.mean_std(dmg)))
                        csq = crmodel.compute_csq(asset, fractions, loss_type)
                        for name, values in csq.items():
                            result[name + '_by_asset'].append(
                                (l, r, asset['ordinal'],
                                 scientific.mean_std(values)))
                            by_event = res[name + '_by_event']
                            for eid, value in zip(out.eids, values):
                                by_event[eid][l] += value
        with rsk_mon:
            result['aed'] = aed = numpy.zeros(len(ddic), param['aed_dt'])
            for i, ((aid, eid), dd) in enumerate(sorted(ddic.items())):
                aed[i] = (aid, eid, dd)
        yield result
    yield res
示例#45
0
 def assert_ok(self, fractions, expected_means, expected_stdevs):
     # a scenario_damage calculator returns:
     # 1.the damage_distribution, i.e. (means, stdevs) for all damage states
     # 2.the collapse_map, i.e. (mean, stdev) of the highest damage state
     assert_close(scientific.mean_std(fractions),
                  (expected_means, expected_stdevs))
示例#46
0
 def assert_ok(self, fractions, expected_means, expected_stdevs):
     # a scenario_damage calculator returns:
     # 1.the damage_distribution, i.e. (means, stdevs) for all damage states
     # 2.the collapse_map, i.e. (mean, stdev) of the highest damage state
     assert_close(scientific.mean_std(fractions),
                  (expected_means, expected_stdevs))
示例#47
0
def scenario_damage(riskinputs, crmodel, param, monitor):
    """
    Core function for a damage computation.

    :param riskinputs:
        :class:`openquake.risklib.riskinput.RiskInput` objects
    :param crmodel:
        a :class:`openquake.risklib.riskinput.CompositeRiskModel` instance
    :param monitor:
        :class:`openquake.baselib.performance.Monitor` instance
    :param param:
        dictionary of extra parameters
    :returns:
        a dictionary {'d_asset': [(l, r, a, mean-stddev), ...],
                      'd_event': dict eid -> array of shape (L, D)
                      + optional consequences}

    `d_asset` and `d_tag` are related to the damage distributions.
    """
    L = len(crmodel.loss_types)
    D = len(crmodel.damage_states)
    consequences = crmodel.get_consequences()
    haz_mon = monitor('getting hazard', measuremem=False)
    rsk_mon = monitor('aggregating risk', measuremem=False)
    d_event = AccumDict(accum=numpy.zeros((L, D), U32))
    res = {'d_event': d_event}
    for name in consequences:
        res[name + '_by_event'] = AccumDict(accum=numpy.zeros(L, F64))
        # using F64 here is necessary: with F32 the non-commutativity
        # of addition would hurt too much with multiple tasks
    seed = param['master_seed']
    # algorithm used to compute the discrete damage distributions
    make_ddd = approx_ddd if param['approx_ddd'] else bin_ddd
    for ri in riskinputs:
        # otherwise test 4b will randomly break with last digit changes
        # in dmg_by_event :-(
        result = dict(d_asset=[])
        for name in consequences:
            result[name + '_by_asset'] = []
        ddic = AccumDict(accum=numpy.zeros((L, D - 1), F32))  # aid,eid->dd
        with haz_mon:
            ri.hazard_getter.init()
        for out in ri.gen_outputs(crmodel, monitor):
            with rsk_mon:
                r = out.rlzi
                for l, loss_type in enumerate(crmodel.loss_types):
                    for asset, fractions in zip(ri.assets, out[loss_type]):
                        aid = asset['ordinal']
                        ddds = make_ddd(fractions, asset['number'], seed + aid)
                        for e, ddd in enumerate(ddds):
                            eid = out.eids[e]
                            if ddd[1:].any():
                                ddic[aid, eid][l] = ddd[1:]
                                d_event[eid][l] += ddd
                        if make_ddd is approx_ddd:
                            ms = mean_std(fractions * asset['number'])
                        else:
                            ms = mean_std(ddds)
                        result['d_asset'].append((l, r, asset['ordinal'], ms))
                        # TODO: use the ddd, not the fractions in compute_csq
                        csq = crmodel.compute_csq(asset, fractions, loss_type)
                        for name, values in csq.items():
                            result[name + '_by_asset'].append(
                                (l, r, asset['ordinal'], mean_std(values)))
                            by_event = res[name + '_by_event']
                            for eid, value in zip(out.eids, values):
                                by_event[eid][l] += value
        with rsk_mon:
            result['aed'] = aed = numpy.zeros(len(ddic), param['aed_dt'])
            for i, ((aid, eid), dd) in enumerate(sorted(ddic.items())):
                aed[i] = (aid, eid, dd)
        yield result
    yield res