Esempio n. 1
0
def classical_risk(riskinputs, riskmodel, rlzs_assoc, monitor):
    """
    Compute and return the average losses for each asset.

    :param riskinputs:
        a list of :class:`openquake.risklib.riskinput.RiskInput` objects
    :param riskmodel:
        a :class:`openquake.risklib.riskinput.RiskModel` instance
    :param rlzs_assoc:
        associations (trt_id, gsim) -> realizations
    :param monitor:
        :class:`openquake.baselib.performance.PerformanceMonitor` instance
    """
    result = general.AccumDict({rlz.ordinal: general.AccumDict()
                                for rlz in rlzs_assoc.realizations})
    for out_by_rlz in riskmodel.gen_outputs(riskinputs, rlzs_assoc, monitor):
        for out in out_by_rlz:
            values = workflows.get_values(out.loss_type, out.assets)
            for i, asset in enumerate(out.assets):
                if out.average_insured_losses is not None:
                    ins = out.average_insured_losses[i] * values[i]
                else:
                    ins = numpy.nan
                result[out.hid][out.loss_type, asset.id] = (
                    out.average_losses[i] * values[i], ins)
    return result
Esempio n. 2
0
def classical_risk(riskinputs, riskmodel, rlzs_assoc, monitor):
    """
    Compute and return the average losses for each asset.

    :param riskinputs:
        a list of :class:`openquake.risklib.riskinput.RiskInput` objects
    :param riskmodel:
        a :class:`openquake.risklib.riskinput.RiskModel` instance
    :param rlzs_assoc:
        associations (trt_id, gsim) -> realizations
    :param monitor:
        :class:`openquake.commonlib.parallel.PerformanceMonitor` instance
    """
    result = general.AccumDict(
        {rlz.ordinal: general.AccumDict()
         for rlz in rlzs_assoc.realizations})
    for out_by_rlz in riskmodel.gen_outputs(riskinputs, rlzs_assoc, monitor):
        for out in out_by_rlz:
            values = workflows.get_values(out.loss_type, out.assets)
            for i, asset in enumerate(out.assets):
                if out.average_insured_losses is not None:
                    ins = out.average_insured_losses[i] * values[i]
                else:
                    ins = numpy.nan
                result[out.hid][out.loss_type,
                                asset.id] = (out.average_losses[i] * values[i],
                                             ins)
    return result
Esempio n. 3
0
def classical_risk(riskinputs, riskmodel, rlzs_assoc, monitor):
    """
    Compute and return the average losses for each asset.

    :param riskinputs:
        a list of :class:`openquake.risklib.riskinput.RiskInput` objects
    :param riskmodel:
        a :class:`openquake.risklib.riskinput.RiskModel` instance
    :param rlzs_assoc:
        associations (trt_id, gsim) -> realizations
    :param monitor:
        :class:`openquake.baselib.performance.PerformanceMonitor` instance
    """
    lti = riskmodel.lti
    oq = monitor.oqparam
    ins = oq.insured_losses

    result = dict(loss_curves=[], loss_maps=[], stat_curves=[], stat_maps=[])
    for out_by_rlz in riskmodel.gen_outputs(riskinputs, rlzs_assoc, monitor):
        l = lti[out_by_rlz.loss_type]
        values = workflows.get_values(out_by_rlz.loss_type, out_by_rlz.assets)
        for out in out_by_rlz:
            r = out.hid
            for i, asset in enumerate(out.assets):
                aid = asset.idx
                val = values[i]
                avg = out.average_losses[i] * val
                avg_ins = (out.average_insured_losses[i] *
                           val if ins else numpy.nan)
                lcurve = (out.loss_curves[i, 0] * val, out.loss_curves[i,
                                                                       1], avg)
                if ins:
                    lcurve += (out.insured_curves[i, 0] * val,
                               out.insured_curves[i, 1], avg_ins)
                else:
                    lcurve += (None, None, None)
                result['loss_curves'].append((l, r, aid, lcurve))

                # no insured, shape (P, N)
                result['loss_maps'].append(
                    (l, r, aid, out.loss_maps[:, i] * val))

        # compute statistics
        if len(out_by_rlz) > 1:
            cb = riskmodel.curve_builders[l]
            statsbuilder = scientific.StatsBuilder(
                oq.quantile_loss_curves,
                oq.conditional_loss_poes,
                oq.poes_disagg,
                cb.curve_resolution,
                insured_losses=oq.insured_losses)
            stats = statsbuilder.build(out_by_rlz)
            stat_curves, stat_maps = statsbuilder.get_curves_maps(stats)
            for asset, stat_curve, stat_map in zip(out_by_rlz.assets,
                                                   stat_curves, stat_maps):
                result['stat_curves'].append((l, asset.idx, stat_curve))
                result['stat_maps'].append((l, asset.idx, stat_map))

    return result
Esempio n. 4
0
    def post_execute(self, result):
        """
        Extract from the result dictionary
        rlz.ordinal -> (loss_type, tag) -> [(asset.id, loss), ...]
        several interesting outputs.
        """
        oq = self.oqparam
        # take the cached self.rlzs_assoc and write it on the datastore
        self.rlzs_assoc = self.rlzs_assoc
        rlzs = self.rlzs_assoc.realizations
        loss_types = self.riskmodel.get_loss_types()

        C = oq.loss_curve_resolution
        self.loss_curve_dt = numpy.dtype(
            [('losses', (float, C)), ('poes', (float, C)), ('avg', float)])

        if oq.conditional_loss_poes:
            lm_names = _loss_map_names(oq.conditional_loss_poes)
            self.loss_map_dt = numpy.dtype([(f, float) for f in lm_names])

        self.assets = assets = riskinput.sorted_assets(self.assets_by_site)

        self.specific_assets = specific_assets = [
            a for a in assets if a.id in self.oqparam.specific_assets]
        specific_asset_refs = set(self.oqparam.specific_assets)

        N = len(assets)

        event_loss_asset = [{} for rlz in rlzs]
        event_loss = [{} for rlz in rlzs]

        loss_curves = self.zeros(N, self.loss_curve_dt)
        ins_curves = self.zeros(N, self.loss_curve_dt)
        if oq.conditional_loss_poes:
            loss_maps = self.zeros(N, self.loss_map_dt)
        agg_loss_curve = self.zeros(1, self.loss_curve_dt)

        for i in sorted(result):
            rlz = rlzs[i]

            data_by_lt_tag = result[i]
            # (loss_type, asset_id) -> [(tag, loss, ins_loss), ...]
            elass = {(loss_type, asset.id): [] for asset in assets
                     for loss_type in loss_types}
            elagg = []  # aggregate event loss
            nonzero = total = 0
            for loss_type, tag in data_by_lt_tag:
                d = data_by_lt_tag[loss_type, tag]
                if tag == 'counts_matrix':
                    assets, counts = d.keys(), d.values()
                    indices = numpy.array([asset.idx for asset in assets])
                    asset_values = workflows.get_values(loss_type, assets)
                    poes = scientific.build_poes(
                        counts, oq.ses_per_logic_tree_path)
                    cb = scientific.CurveBuilder(
                        loss_type, numpy.linspace(0, 1, C))
                    lcurves = cb.build_loss_curves(
                        poes, asset_values, indices, N)
                    self.store('lcurves/' + loss_type, rlz, lcurves)
                    continue

                for aid, loss, ins_loss in d['data']:
                    elass[loss_type, aid].append((tag, loss, ins_loss))

                # aggregates
                elagg.append((loss_type, tag, d['loss'], d['ins_loss']))
                nonzero += d['nonzero']
                total += d['total']
            logging.info('rlz=%d: %d/%d nonzero losses', i, nonzero, total)

            if elass:
                data_by_lt = collections.defaultdict(list)
                for (loss_type, asset_id), rows in elass.items():
                    for tag, loss, ins_loss in rows:
                        data_by_lt[loss_type].append(
                            (tag, asset_id, loss, ins_loss))
                for loss_type, data in data_by_lt.items():
                    event_loss_asset[i][loss_type] = sorted(
                        # data contains rows (tag, asset, loss, ins_loss)
                        (t, a, l, i) for t, a, l, i in data
                        if a in specific_asset_refs)

                    # build the loss curves per asset
                    lc = self.build_loss_curves(elass, loss_type, 1)
                    loss_curves[loss_type] = lc

                    if oq.insured_losses:
                        # build the insured loss curves per asset
                        ic = self.build_loss_curves(elass, loss_type, 2)
                        ins_curves[loss_type] = ic

                    if oq.conditional_loss_poes:
                        # build the loss maps per asset, array of shape (N, P)
                        losses_poes = numpy.array(  # shape (N, 2, C)
                            [lc['losses'], lc['poes']]).transpose(1, 0, 2)
                        lmaps = scientific.loss_map_matrix(
                            oq.conditional_loss_poes, losses_poes)  # (P, N)
                        for lm, lmap in zip(lm_names, lmaps):
                            loss_maps[loss_type][lm] = lmap

            self.store('loss_curves', rlz, loss_curves)
            if oq.insured_losses:
                self.store('ins_curves', rlz, ins_curves)
            if oq.conditional_loss_poes:
                self.store('loss_maps', rlz, loss_maps)

            if elagg:
                for loss_type, rows in groupby(
                        elagg, operator.itemgetter(0)).items():
                    event_loss[i][loss_type] = [row[1:] for row in rows]
                    # aggregate loss curve for all tags
                    losses, poes, avg, _ = self.build_agg_loss_curve_and_map(
                        [loss for _lt, _tag, loss, _ins_loss in rows])
                    # NB: there is no aggregate insured loss curve
                    agg_loss_curve[loss_type][0] = (losses, poes, avg)
                    # NB: the aggregated loss_map is not stored
                self.store('agg_loss_curve', rlz, agg_loss_curve)

        if specific_assets:
            self.event_loss_asset = event_loss_asset
        self.event_loss = event_loss

        # store statistics (i.e. mean and quantiles) for curves and maps
        if len(self.rlzs_assoc.realizations) > 1:
            self.compute_store_stats('loss_curves')
            self.compute_store_stats('agg_loss_curve')