Exemplo n.º 1
0
    def post_execute(self, result):
        """
        Save the losses in a compact form.

        :param result:
            a dictionary rlz_idx -> (loss_type, asset_id) -> (avg, ins)
        """
        fields = []
        for loss_type in self.riskmodel.get_loss_types():
            fields.append(('avg_loss~%s' % loss_type, float))
            fields.append(('ins_loss~%s' % loss_type, float))
        avg_loss_dt = numpy.dtype(fields)
        num_rlzs = len(self.rlzs_assoc.realizations)
        assets = riskinput.sorted_assets(self.assets_by_site)
        self.asset_no_by_id = {a.id: no for no, a in enumerate(assets)}
        avg_losses = numpy.zeros(
            (len(self.asset_no_by_id), num_rlzs), avg_loss_dt)

        for rlz_no in result:
            losses_by_lt_asset = result[rlz_no]
            by_asset = operator.itemgetter(1)
            for asset, keys in general.groupby(
                    losses_by_lt_asset, by_asset).items():
                asset_no = self.asset_no_by_id[asset]
                losses = []
                for (loss_type, _) in keys:
                    losses.extend(losses_by_lt_asset[loss_type, asset])
                avg_losses[asset_no, rlz_no] = tuple(losses)

        self.avg_losses = avg_losses
Exemplo n.º 2
0
    def post_execute(self, result):
        """
        Save the losses in a compact form.

        :param result:
            a dictionary rlz_idx -> (loss_type, asset_id) -> (avg, ins)
        """
        fields = []
        for loss_type in self.riskmodel.get_loss_types():
            fields.append(('avg_loss~%s' % loss_type, float))
            fields.append(('ins_loss~%s' % loss_type, float))
        avg_loss_dt = numpy.dtype(fields)
        num_rlzs = len(self.rlzs_assoc.realizations)
        assets = riskinput.sorted_assets(self.assets_by_site)
        self.asset_no_by_id = {a.id: no for no, a in enumerate(assets)}
        avg_losses = numpy.zeros((num_rlzs, len(self.asset_no_by_id)),
                                 avg_loss_dt)

        for rlz_no in result:
            losses_by_lt_asset = result[rlz_no]
            by_asset = operator.itemgetter(1)
            for asset, keys in general.groupby(losses_by_lt_asset,
                                               by_asset).items():
                asset_no = self.asset_no_by_id[asset]
                losses = []
                for (loss_type, _) in keys:
                    losses.extend(losses_by_lt_asset[loss_type, asset])
                avg_losses[rlz_no][asset_no] = tuple(losses)

        self.avg_losses = avg_losses
Exemplo n.º 3
0
    def post_execute(self, result):
        """
        Extract from the result dictionary
        rlz.ordinal -> (loss_type, tag) -> [(asset.id, loss), ...]
        several interesting outputs.
        """
        oq = self.oqparam
        # take the cached self.rlzs_assoc and write it on the datastore
        self.rlzs_assoc = self.rlzs_assoc
        rlzs = self.rlzs_assoc.realizations
        loss_types = self.riskmodel.get_loss_types()

        C = oq.loss_curve_resolution
        self.loss_curve_dt = numpy.dtype(
            [('losses', (float, C)), ('poes', (float, C)), ('avg', float)])

        if oq.conditional_loss_poes:
            lm_names = _loss_map_names(oq.conditional_loss_poes)
            self.loss_map_dt = numpy.dtype([(f, float) for f in lm_names])

        self.assets = assets = riskinput.sorted_assets(self.assets_by_site)

        self.specific_assets = specific_assets = [
            a for a in assets if a.id in self.oqparam.specific_assets]
        specific_asset_refs = set(self.oqparam.specific_assets)

        N = len(assets)

        event_loss_asset = [{} for rlz in rlzs]
        event_loss = [{} for rlz in rlzs]

        loss_curves = self.zeros(N, self.loss_curve_dt)
        ins_curves = self.zeros(N, self.loss_curve_dt)
        if oq.conditional_loss_poes:
            loss_maps = self.zeros(N, self.loss_map_dt)
        agg_loss_curve = self.zeros(1, self.loss_curve_dt)

        for i in sorted(result):
            rlz = rlzs[i]

            data_by_lt_tag = result[i]
            # (loss_type, asset_id) -> [(tag, loss, ins_loss), ...]
            elass = {(loss_type, asset.id): [] for asset in assets
                     for loss_type in loss_types}
            elagg = []  # aggregate event loss
            nonzero = total = 0
            for loss_type, tag in data_by_lt_tag:
                d = data_by_lt_tag[loss_type, tag]
                if tag == 'counts_matrix':
                    assets, counts = d.keys(), d.values()
                    indices = numpy.array([asset.idx for asset in assets])
                    asset_values = workflows.get_values(loss_type, assets)
                    poes = scientific.build_poes(
                        counts, oq.ses_per_logic_tree_path)
                    cb = scientific.CurveBuilder(
                        loss_type, numpy.linspace(0, 1, C))
                    lcurves = cb.build_loss_curves(
                        poes, asset_values, indices, N)
                    self.store('lcurves/' + loss_type, rlz, lcurves)
                    continue

                for aid, loss, ins_loss in d['data']:
                    elass[loss_type, aid].append((tag, loss, ins_loss))

                # aggregates
                elagg.append((loss_type, tag, d['loss'], d['ins_loss']))
                nonzero += d['nonzero']
                total += d['total']
            logging.info('rlz=%d: %d/%d nonzero losses', i, nonzero, total)

            if elass:
                data_by_lt = collections.defaultdict(list)
                for (loss_type, asset_id), rows in elass.items():
                    for tag, loss, ins_loss in rows:
                        data_by_lt[loss_type].append(
                            (tag, asset_id, loss, ins_loss))
                for loss_type, data in data_by_lt.items():
                    event_loss_asset[i][loss_type] = sorted(
                        # data contains rows (tag, asset, loss, ins_loss)
                        (t, a, l, i) for t, a, l, i in data
                        if a in specific_asset_refs)

                    # build the loss curves per asset
                    lc = self.build_loss_curves(elass, loss_type, 1)
                    loss_curves[loss_type] = lc

                    if oq.insured_losses:
                        # build the insured loss curves per asset
                        ic = self.build_loss_curves(elass, loss_type, 2)
                        ins_curves[loss_type] = ic

                    if oq.conditional_loss_poes:
                        # build the loss maps per asset, array of shape (N, P)
                        losses_poes = numpy.array(  # shape (N, 2, C)
                            [lc['losses'], lc['poes']]).transpose(1, 0, 2)
                        lmaps = scientific.loss_map_matrix(
                            oq.conditional_loss_poes, losses_poes)  # (P, N)
                        for lm, lmap in zip(lm_names, lmaps):
                            loss_maps[loss_type][lm] = lmap

            self.store('loss_curves', rlz, loss_curves)
            if oq.insured_losses:
                self.store('ins_curves', rlz, ins_curves)
            if oq.conditional_loss_poes:
                self.store('loss_maps', rlz, loss_maps)

            if elagg:
                for loss_type, rows in groupby(
                        elagg, operator.itemgetter(0)).items():
                    event_loss[i][loss_type] = [row[1:] for row in rows]
                    # aggregate loss curve for all tags
                    losses, poes, avg, _ = self.build_agg_loss_curve_and_map(
                        [loss for _lt, _tag, loss, _ins_loss in rows])
                    # NB: there is no aggregate insured loss curve
                    agg_loss_curve[loss_type][0] = (losses, poes, avg)
                    # NB: the aggregated loss_map is not stored
                self.store('agg_loss_curve', rlz, agg_loss_curve)

        if specific_assets:
            self.event_loss_asset = event_loss_asset
        self.event_loss = event_loss

        # store statistics (i.e. mean and quantiles) for curves and maps
        if len(self.rlzs_assoc.realizations) > 1:
            self.compute_store_stats('loss_curves')
            self.compute_store_stats('agg_loss_curve')