Пример #1
0
    def __call__(self, loss_type, assets, ground_motion_values, epsilons,
                 event_ids):
        """
        :param str loss_type: the loss type considered

        :param assets:
           assets is an iterator over
           :class:`openquake.risklib.workflows.Asset` instances

        :param ground_motion_values:
           a numpy array with ground_motion_values of shape N x R

        :param epsilons:
           a numpy array with stochastic values of shape N x R

        :param event_ids:
           a numpy array of R event ID (integer)

        :returns:
            a
            :class:`openquake.risklib.workflows.ProbabilisticEventBased.Output`
            instance.
        """
        loss_matrix = self.risk_functions[loss_type].apply_to(
            ground_motion_values, epsilons)

        curves = self.curves(loss_matrix)
        average_losses = numpy.array([scientific.average_loss(losses, poes)
                                      for losses, poes in curves])
        stddev_losses = numpy.std(loss_matrix, axis=1)
        values = utils.numpy_map(lambda a: a.value(loss_type), assets)
        maps = self.maps(curves)
        elt = self.event_loss(loss_matrix.transpose() * values, event_ids)

        if self.insured_losses and loss_type != 'fatalities':
            deductibles = [a.deductible(loss_type) for a in assets]
            limits = [a.insurance_limit(loss_type) for a in assets]
            insured_loss_matrix = utils.numpy_map(
                scientific.insured_losses, loss_matrix, deductibles, limits)
            insured_curves = self.curves(insured_loss_matrix)
            average_insured_losses = [
                scientific.average_loss(losses, poes)
                for losses, poes in insured_curves]
            stddev_insured_losses = numpy.std(insured_loss_matrix, axis=1)
        else:
            insured_curves = None
            average_insured_losses = None
            stddev_insured_losses = None

        return self.Output(
            assets, loss_matrix if self.return_loss_matrix else None,
            curves, average_losses, stddev_losses,
            insured_curves, average_insured_losses, stddev_insured_losses,
            maps, elt)
Пример #2
0
    def __call__(self, loss_type, assets, ground_motion_values, epsilons):
        values = numpy.array([a.value(loss_type) for a in assets])

        # a matrix of N x R elements
        loss_ratio_matrix = self.risk_functions[loss_type].apply_to(
            ground_motion_values, epsilons)

        # aggregating per asset, getting a vector of R elements
        aggregate_losses = numpy.sum(
            loss_ratio_matrix.transpose() * values, axis=1)

        if self.insured_losses and loss_type != "fatalities":
            deductibles = [a.deductible(loss_type) for a in assets]
            limits = [a.insurance_limit(loss_type) for a in assets]
            insured_loss_ratio_matrix = utils.numpy_map(
                scientific.insured_losses,
                loss_ratio_matrix, deductibles, limits)

            insured_loss_matrix = (
                insured_loss_ratio_matrix.transpose() * values).transpose()

            insured_losses = numpy.array(insured_loss_matrix).sum(axis=0)
        else:
            insured_loss_matrix = None
            insured_losses = None

        return (assets, loss_ratio_matrix, aggregate_losses,
                insured_loss_matrix, insured_losses)
Пример #3
0
    def __call__(self, loss_type, assets, ground_motion_values, epsilons,
                 _tags=None):
        values = get_values(loss_type, assets, self.time_event)

        # a matrix of N x R elements
        loss_ratio_matrix = self.risk_functions[loss_type].apply_to(
            ground_motion_values, epsilons)
        # another matrix of N x R elements
        loss_matrix = (loss_ratio_matrix.T * values).T
        # an array of R elements
        aggregate_losses = loss_matrix.sum(axis=0)

        if self.insured_losses and loss_type != "fatalities":
            deductibles = [a.deductible(loss_type) for a in assets]
            limits = [a.insurance_limit(loss_type) for a in assets]
            insured_loss_ratio_matrix = utils.numpy_map(
                scientific.insured_losses,
                loss_ratio_matrix, deductibles, limits)
            insured_loss_matrix = (insured_loss_ratio_matrix.T * values).T

            # aggregating per asset, getting a vector of R elements
            insured_losses = insured_loss_matrix.sum(axis=0)
        else:
            insured_loss_matrix = None
            insured_losses = None
        return scientific.Output(
            assets, loss_type, loss_matrix=loss_matrix,
            loss_ratio_matrix=loss_ratio_matrix,
            aggregate_losses=aggregate_losses,
            insured_loss_matrix=insured_loss_matrix,
            insured_losses=insured_losses)
Пример #4
0
    def __call__(self, loss_type, assets, ground_motion_values, epsilons,
                 event_ids):
        """
        :param str loss_type: the loss type considered

        :param assets:
           assets is an iterator over
           :class:`openquake.risklib.scientific.Asset` instances

        :param ground_motion_values:
           a numpy array with ground_motion_values of shape N x R

        :param epsilons:
           a numpy array with stochastic values of shape N x R

        :param event_ids:
           a numpy array of R event ID (integer)

        :returns:
            a :class:
            `openquake.risklib.scientific.ProbabilisticEventBased.Output`
            instance.
        """
        n = len(assets)
        loss_matrix = self.risk_functions[loss_type].apply_to(
            ground_motion_values, epsilons)
        # sum on ruptures; compute the fractional losses
        average_losses = loss_matrix.sum(axis=1) * self.ses_ratio
        values = get_values(loss_type, assets)
        ela = loss_matrix.T * values  # matrix with T x N elements
        cb = self.riskmodel.curve_builders[self.riskmodel.lti[loss_type]]
        # FIXME: ugly workaround for qa_tests.event_based_test; in Ubuntu 12.04
        # MagicMock does not work well, so len(cb.ratios) gives an error
        nratios = 1 if isinstance(cb, mock.Mock) else len(cb.ratios)
        if self.insured_losses and loss_type != 'fatalities':
            deductibles = numpy.array(
                [a.deductible(loss_type) for a in assets])
            limits = numpy.array(
                [a.insurance_limit(loss_type) for a in assets])
            ilm = utils.numpy_map(
                scientific.insured_losses, loss_matrix, deductibles, limits)
            icounts = cb.build_counts(ilm)
        else:  # build a NaN matrix of size N x T
            T = len(ground_motion_values[0])
            ilm = numpy.empty((n, T))
            ilm.fill(numpy.nan)
            icounts = numpy.empty((n, nratios))
            icounts.fill(numpy.nan)
        ila = ilm.T * values
        average_insured_losses = ilm.sum(axis=1) * self.ses_ratio
        return scientific.Output(
            assets, loss_type,
            event_loss_per_asset=ela,
            insured_loss_per_asset=ila,
            average_losses=average_losses,
            average_insured_losses=average_insured_losses,
            counts_matrix=cb.build_counts(loss_matrix),
            insured_counts_matrix=icounts,
            tags=event_ids)
Пример #5
0
    def __call__(self, loss_type, assets, hazard_curve, _eps=None):
        """
        :param str loss_type:
            the loss type considered
        :param assets:
            assets is an iterator over N
            :class:`openquake.risklib.scientific.Asset` instances
        :param hazard_curve:
            an array of poes
        :param _eps:
            ignored, here only for API compatibility with other calculators
        :returns:
            a :class:`openquake.risklib.scientific.Classical.Output` instance.
        """
        n = len(assets)
        vf = self.risk_functions[loss_type]
        imls = self.hazard_imtls[vf.imt]
        curves = [
            scientific.classical(vf, imls, hazard_curve,
                                 self.lrem_steps_per_interval)
        ] * n
        average_losses = utils.numpy_map(scientific.average_loss, curves)
        maps = scientific.loss_map_matrix(self.conditional_loss_poes, curves)
        values = get_values(loss_type, assets)

        if self.insured_losses and loss_type != 'occupants':
            deductibles = [a.deductible(loss_type) for a in assets]
            limits = [a.insurance_limit(loss_type) for a in assets]

            insured_curves = rescale(
                utils.numpy_map(scientific.insured_loss_curve, curves,
                                deductibles, limits), values)
            average_insured_losses = utils.numpy_map(scientific.average_loss,
                                                     insured_curves)
        else:
            insured_curves = None
            average_insured_losses = None

        return scientific.Output(assets,
                                 loss_type,
                                 loss_curves=rescale(numpy.array(curves),
                                                     values),
                                 average_losses=values * average_losses,
                                 insured_curves=insured_curves,
                                 average_insured_losses=average_insured_losses,
                                 loss_maps=values * maps)
Пример #6
0
    def __call__(self, loss_type, assets, hazard, _eps=None, _eids=None):
        """
        :param loss_type: the loss type
        :param assets: a list of N assets of the same taxonomy
        :param hazard: an hazard curve
        :param _eps: dummy parameter, unused
        :param _eids: dummy parameter, unused
        :returns: a :class:`openquake.risklib.scientific.Output` instance
        """
        n = len(assets)
        self.assets = assets
        vf = self.risk_functions[loss_type]
        imls = self.hazard_imtls[vf.imt]
        vf_retro = self.retro_functions[loss_type]
        curves_orig = functools.partial(scientific.classical,
                                        vf,
                                        imls,
                                        steps=self.lrem_steps_per_interval)
        curves_retro = functools.partial(scientific.classical,
                                         vf_retro,
                                         imls,
                                         steps=self.lrem_steps_per_interval)
        original_loss_curves = utils.numpy_map(curves_orig, [hazard] * n)
        retrofitted_loss_curves = utils.numpy_map(curves_retro, [hazard] * n)

        eal_original = utils.numpy_map(scientific.average_loss,
                                       original_loss_curves)

        eal_retrofitted = utils.numpy_map(scientific.average_loss,
                                          retrofitted_loss_curves)

        bcr_results = [
            scientific.bcr(eal_original[i], eal_retrofitted[i],
                           self.interest_rate, self.asset_life_expectancy,
                           asset.value(loss_type),
                           asset.retrofitted(loss_type))
            for i, asset in enumerate(assets)
        ]

        return scientific.Output(assets,
                                 loss_type,
                                 data=list(
                                     zip(eal_original, eal_retrofitted,
                                         bcr_results)))
Пример #7
0
    def __call__(self,
                 loss_type,
                 assets,
                 hazard_curves,
                 _epsilons=None,
                 _tags=None):
        """
        :param str loss_type:
            the loss type considered
        :param assets:
            assets is an iterator over N
            :class:`openquake.risklib.scientific.Asset` instances
        :param hazard_curves:
            an iterator over N arrays with the poes
        :param _epsilons:
            ignored, here only for API compatibility with other calculators
        :returns:
            a :class:`openquake.risklib.scientific.Classical.Output` instance.
        """
        curves = utils.numpy_map(self.curves[loss_type], hazard_curves)
        average_losses = utils.numpy_map(scientific.average_loss, curves)
        maps = scientific.loss_map_matrix(self.conditional_loss_poes, curves)
        fractions = scientific.loss_map_matrix(self.poes_disagg, curves)

        if self.insured_losses and loss_type != 'fatalities':
            deductibles = [a.deductible(loss_type) for a in assets]
            limits = [a.insurance_limit(loss_type) for a in assets]

            insured_curves = utils.numpy_map(scientific.insured_loss_curve,
                                             curves, deductibles, limits)
            average_insured_losses = utils.numpy_map(scientific.average_loss,
                                                     insured_curves)
        else:
            insured_curves = None
            average_insured_losses = None

        return scientific.Output(assets,
                                 loss_type,
                                 loss_curves=curves,
                                 average_losses=average_losses,
                                 insured_curves=insured_curves,
                                 average_insured_losses=average_insured_losses,
                                 loss_maps=maps,
                                 loss_fractions=fractions)
Пример #8
0
    def __call__(self, loss_type, assets, hazard_curve, _eps=None):
        """
        :param str loss_type:
            the loss type considered
        :param assets:
            assets is an iterator over N
            :class:`openquake.risklib.scientific.Asset` instances
        :param hazard_curve:
            an array of poes
        :param _eps:
            ignored, here only for API compatibility with other calculators
        :returns:
            a :class:`openquake.risklib.scientific.Classical.Output` instance.
        """
        n = len(assets)
        vf = self.risk_functions[loss_type]
        imls = self.hazard_imtls[vf.imt]
        curves = [scientific.classical(
            vf, imls, hazard_curve, self.lrem_steps_per_interval)] * n
        average_losses = utils.numpy_map(scientific.average_loss, curves)
        maps = scientific.loss_map_matrix(self.conditional_loss_poes, curves)
        values = get_values(loss_type, assets)

        if self.insured_losses and loss_type != 'occupants':
            deductibles = [a.deductible(loss_type) for a in assets]
            limits = [a.insurance_limit(loss_type) for a in assets]

            insured_curves = rescale(
                utils.numpy_map(scientific.insured_loss_curve,
                                curves, deductibles, limits), values)
            average_insured_losses = utils.numpy_map(
                scientific.average_loss, insured_curves)
        else:
            insured_curves = None
            average_insured_losses = None

        return scientific.Output(
            assets, loss_type,
            loss_curves=rescale(numpy.array(curves), values),
            average_losses=values * average_losses,
            insured_curves=insured_curves,
            average_insured_losses=average_insured_losses,
            loss_maps=values * maps)
Пример #9
0
    def __call__(self, loss_type, assets, hazard, _eps=None, _eids=None):
        """
        :param loss_type: the loss type
        :param assets: a list of N assets of the same taxonomy
        :param hazard: an hazard curve
        :param _eps: dummy parameter, unused
        :param _eids: dummy parameter, unused
        :returns: a list of triples (eal_orig, eal_retro, bcr_result)
        """
        if loss_type != 'structural':
            raise NotImplemented('retrofitted is not defined for ' + loss_type)
        n = len(assets)
        self.assets = assets
        vf = self.risk_functions[loss_type]
        imls = self.hazard_imtls[vf.imt]
        vf_retro = self.retro_functions[loss_type]
        curves_orig = functools.partial(scientific.classical,
                                        vf,
                                        imls,
                                        steps=self.lrem_steps_per_interval)
        curves_retro = functools.partial(scientific.classical,
                                         vf_retro,
                                         imls,
                                         steps=self.lrem_steps_per_interval)
        original_loss_curves = utils.numpy_map(curves_orig, [hazard] * n)
        retrofitted_loss_curves = utils.numpy_map(curves_retro, [hazard] * n)

        eal_original = utils.numpy_map(scientific.average_loss,
                                       original_loss_curves)

        eal_retrofitted = utils.numpy_map(scientific.average_loss,
                                          retrofitted_loss_curves)

        bcr_results = [
            scientific.bcr(eal_original[i], eal_retrofitted[i],
                           self.interest_rate, self.asset_life_expectancy,
                           asset.value(loss_type), asset.retrofitted())
            for i, asset in enumerate(assets)
        ]
        return list(zip(eal_original, eal_retrofitted, bcr_results))
Пример #10
0
def vulnerability_function_applier(
        vulnerability_function, ground_motion_values,
        seed=None, asset_correlation=0):
    if numpy.array(ground_motion_values).ndim == 1:
        return numpy.array([])

    # FIXME(lp). Refactor me to avoid the side effect
    vulnerability_function.init_distribution(
        len(ground_motion_values),
        len(ground_motion_values[0]),
        seed,
        asset_correlation)
    return utils.numpy_map(vulnerability_function, ground_motion_values)
Пример #11
0
def scenario_damage(fragility_functions, gmvs):
    """
    Compute the damage state fractions for the given array of ground
    motion values. Returns an NxM matrix where N is the number of
    realizations and M is the numbers of damage states.
    """
    return utils.numpy_map(
        lambda gmv:
        numpy.array(
            list(pairwise_diff(
                [1] +
                [ff(gmv) for ff in fragility_functions] +
                [0]))),
        gmvs)
Пример #12
0
    def __call__(self, loss_type, assets, hazard_curves, _epsilons=None,
                 _tags=None):
        """
        :param loss_type: the string 'damage'
        :param assets: a list of N assets of the same taxonomy
        :param hazard_curves: an array of N x R elements
        :returns: an array of N assets and an array of N x D elements

        where N is the number of points and D the number of damage states.
        """
        fractions = utils.numpy_map(self.curves, hazard_curves)
        damages = [asset.number * fraction
                   for asset, fraction in zip(assets, fractions)]
        return scientific.Output(assets, 'damage', damages=damages)
Пример #13
0
    def __call__(self, loss_type, assets, gmfs, epsilons, event_ids):
        self.assets = assets

        original_loss_curves = utils.numpy_map(
            self.curves, self.vf_orig[loss_type].apply_to(gmfs, epsilons))
        retrofitted_loss_curves = utils.numpy_map(
            self.curves, self.vf_retro[loss_type].apply_to(gmfs, epsilons))

        eal_original = utils.numpy_map(
            scientific.average_loss, original_loss_curves)
        eal_retrofitted = utils.numpy_map(
            scientific.average_loss, retrofitted_loss_curves)

        bcr_results = [
            scientific.bcr(
                eal_original[i], eal_retrofitted[i],
                self.interest_rate, self.asset_life_expectancy,
                asset.value(loss_type), asset.retrofitted(loss_type))
            for i, asset in enumerate(assets)]

        return scientific.Output(
            assets, loss_type,
            data=list(zip(eal_original, eal_retrofitted, bcr_results)))
Пример #14
0
    def __call__(self, loss_type, assets, hazard_curves, _epsilons=None,
                 _tags=None):
        """
        :param str loss_type:
            the loss type considered
        :param assets:
            assets is an iterator over N
            :class:`openquake.risklib.scientific.Asset` instances
        :param hazard_curves:
            an iterator over N arrays with the poes
        :param _epsilons:
            ignored, here only for API compatibility with other calculators
        :returns:
            a :class:`openquake.risklib.scientific.Classical.Output` instance.
        """
        curves = utils.numpy_map(self.curves[loss_type], hazard_curves)
        average_losses = utils.numpy_map(scientific.average_loss, curves)
        maps = scientific.loss_map_matrix(self.conditional_loss_poes, curves)
        fractions = scientific.loss_map_matrix(self.poes_disagg, curves)

        if self.insured_losses and loss_type != 'fatalities':
            deductibles = [a.deductible(loss_type) for a in assets]
            limits = [a.insurance_limit(loss_type) for a in assets]

            insured_curves = utils.numpy_map(
                scientific.insured_loss_curve, curves, deductibles, limits)
            average_insured_losses = utils.numpy_map(
                scientific.average_loss, insured_curves)
        else:
            insured_curves = None
            average_insured_losses = None

        return scientific.Output(
            assets, loss_type, loss_curves=curves,
            average_losses=average_losses, insured_curves=insured_curves,
            average_insured_losses=average_insured_losses,
            loss_maps=maps, loss_fractions=fractions)
Пример #15
0
    def __call__(self, loss_type, assets, hazard, _eps=None, _eids=None):
        """
        :param loss_type: the loss type
        :param assets: a list of N assets of the same taxonomy
        :param hazard: an hazard curve
        :param _eps: dummy parameter, unused
        :param _eids: dummy parameter, unused
        :returns: a :class:`openquake.risklib.scientific.Output` instance
        """
        n = len(assets)
        self.assets = assets
        vf = self.risk_functions[loss_type]
        imls = self.hazard_imtls[vf.imt]
        vf_retro = self.retro_functions[loss_type]
        curves_orig = functools.partial(scientific.classical, vf, imls,
                                        steps=self.lrem_steps_per_interval)
        curves_retro = functools.partial(scientific.classical, vf_retro, imls,
                                         steps=self.lrem_steps_per_interval)
        original_loss_curves = utils.numpy_map(curves_orig, [hazard] * n)
        retrofitted_loss_curves = utils.numpy_map(curves_retro, [hazard] * n)

        eal_original = utils.numpy_map(
            scientific.average_loss, original_loss_curves)

        eal_retrofitted = utils.numpy_map(
            scientific.average_loss, retrofitted_loss_curves)

        bcr_results = [
            scientific.bcr(
                eal_original[i], eal_retrofitted[i],
                self.interest_rate, self.asset_life_expectancy,
                asset.value(loss_type), asset.retrofitted(loss_type))
            for i, asset in enumerate(assets)]

        return scientific.Output(
            assets, loss_type,
            data=list(zip(eal_original, eal_retrofitted, bcr_results)))
Пример #16
0
    def __call__(self, loss_type, assets, hazard):
        self.assets = assets

        original_loss_curves = utils.numpy_map(
            self.curves_orig[loss_type], hazard)
        retrofitted_loss_curves = utils.numpy_map(
            self.curves_retro[loss_type], hazard)

        eal_original = utils.numpy_map(
            scientific.average_loss, original_loss_curves)

        eal_retrofitted = utils.numpy_map(
            scientific.average_loss, retrofitted_loss_curves)

        bcr_results = [
            scientific.bcr(
                eal_original[i], eal_retrofitted[i],
                self.interest_rate, self.asset_life_expectancy,
                asset.value(loss_type), asset.retrofitted(loss_type))
            for i, asset in enumerate(assets)]

        return scientific.Output(
            assets, loss_type,
            data=zip(eal_original, eal_retrofitted, bcr_results))
Пример #17
0
    def apply_to(self, ground_motion_values, epsilons):
        """
        Apply a copy of the vulnerability function to a set of N
        ground motion vectors, by using N epsilon vectors of length R.
        N is the number of assets and R the number of realizations.

        :param ground_motion_values:
           matrix of floats N x R
        :param epsilons:
           matrix of floats N x R
        """
        assert len(epsilons) == len(ground_motion_values), (
            len(epsilons), len(ground_motion_values))
        vulnerability_function = copy.copy(self)
        vulnerability_function.set_distribution(epsilons)
        return utils.numpy_map(
            vulnerability_function._apply, ground_motion_values)
Пример #18
0
    def __call__(self,
                 loss_type,
                 assets,
                 ground_motion_values,
                 epsilons,
                 _eids=None):
        values = get_values(loss_type, assets, self.time_event)
        ok = ~numpy.isnan(values)
        if not ok.any():
            # there are no assets with a value
            return
        # there may be assets without a value
        missing_value = not ok.all()
        if missing_value:
            assets = assets[ok]
            epsilons = epsilons[ok]

        # a matrix of N x E elements
        loss_ratio_matrix = self.risk_functions[loss_type].apply_to(
            [ground_motion_values] * len(assets), epsilons)
        # another matrix of N x E elements
        loss_matrix = (loss_ratio_matrix.T * values).T
        # an array of E elements
        aggregate_losses = loss_matrix.sum(axis=0)

        if self.insured_losses and loss_type != "occupants":
            deductibles = [a.deductible(loss_type) for a in assets]
            limits = [a.insurance_limit(loss_type) for a in assets]
            insured_loss_ratio_matrix = utils.numpy_map(
                scientific.insured_losses, loss_ratio_matrix, deductibles,
                limits)
            insured_loss_matrix = (insured_loss_ratio_matrix.T * values).T
        else:
            insured_loss_matrix = numpy.empty_like(loss_ratio_matrix)
            insured_loss_matrix.fill(numpy.nan)

        # aggregating per asset, getting a vector of E elements
        insured_losses = insured_loss_matrix.sum(axis=0)
        return scientific.Output(assets,
                                 loss_type,
                                 loss_matrix=loss_matrix,
                                 loss_ratio_matrix=loss_ratio_matrix,
                                 aggregate_losses=aggregate_losses,
                                 insured_loss_matrix=insured_loss_matrix,
                                 insured_losses=insured_losses)
Пример #19
0
    def __call__(self, loss_type, assets, ground_motion_values, epsgetter):
        epsilons = epsgetter()
        values = get_values(loss_type, assets, self.time_event)
        ok = ~numpy.isnan(values)
        if not ok.any():
            # there are no assets with a value
            return
        # there may be assets without a value
        missing_value = not ok.all()
        if missing_value:
            assets = assets[ok]
            epsilons = epsilons[ok]

        # a matrix of N x E elements
        vf = self.risk_functions[loss_type]
        means, covs, idxs = vf.interpolate(ground_motion_values)
        loss_ratio_matrix = numpy.zeros((len(assets), len(epsilons[0])))
        for i, eps in enumerate(epsilons):
            loss_ratio_matrix[i, idxs] = vf.sample(means, covs, idxs, eps)
        # another matrix of N x E elements
        loss_matrix = (loss_ratio_matrix.T * values).T
        # an array of E elements
        aggregate_losses = loss_matrix.sum(axis=0)

        if self.insured_losses and loss_type != "occupants":
            deductibles = [a.deductible(loss_type) for a in assets]
            limits = [a.insurance_limit(loss_type) for a in assets]
            insured_loss_ratio_matrix = utils.numpy_map(
                scientific.insured_losses, loss_ratio_matrix, deductibles,
                limits)
            insured_loss_matrix = (insured_loss_ratio_matrix.T * values).T
        else:
            insured_loss_matrix = numpy.empty_like(loss_ratio_matrix)
            insured_loss_matrix.fill(numpy.nan)

        # aggregating per asset, getting a vector of E elements
        insured_losses = insured_loss_matrix.sum(axis=0)
        return scientific.Output(assets,
                                 loss_type,
                                 loss_matrix=loss_matrix,
                                 loss_ratio_matrix=loss_ratio_matrix,
                                 aggregate_losses=aggregate_losses,
                                 insured_loss_matrix=insured_loss_matrix,
                                 insured_losses=insured_losses)
Пример #20
0
    def __call__(self, loss_type, assets, ground_motion_values, epsgetter):
        epsilons = epsgetter(None, None)
        values = get_values(loss_type, assets, self.time_event)
        ok = ~numpy.isnan(values)
        if not ok.any():
            # there are no assets with a value
            return
        # there may be assets without a value
        missing_value = not ok.all()
        if missing_value:
            assets = assets[ok]
            epsilons = epsilons[ok]

        # a matrix of N x E elements
        vf = self.risk_functions[loss_type]
        means, covs, idxs = vf.interpolate(ground_motion_values)
        loss_ratio_matrix = numpy.zeros((len(assets), len(epsilons[0])))
        for i, eps in enumerate(epsilons):
            loss_ratio_matrix[i, idxs] = vf.sample(means, covs, idxs, eps)
        # another matrix of N x E elements
        loss_matrix = (loss_ratio_matrix.T * values).T
        # an array of E elements
        aggregate_losses = loss_matrix.sum(axis=0)

        if self.insured_losses and loss_type != "occupants":
            deductibles = [a.deductible(loss_type) for a in assets]
            limits = [a.insurance_limit(loss_type) for a in assets]
            insured_loss_ratio_matrix = utils.numpy_map(
                scientific.insured_losses,
                loss_ratio_matrix, deductibles, limits)
            insured_loss_matrix = (insured_loss_ratio_matrix.T * values).T
        else:
            insured_loss_matrix = numpy.empty_like(loss_ratio_matrix)
            insured_loss_matrix.fill(numpy.nan)

        # aggregating per asset, getting a vector of E elements
        insured_losses = insured_loss_matrix.sum(axis=0)
        return scientific.Output(
            assets, loss_type, loss_matrix=loss_matrix,
            loss_ratio_matrix=loss_ratio_matrix,
            aggregate_losses=aggregate_losses,
            insured_loss_matrix=insured_loss_matrix,
            insured_losses=insured_losses)
Пример #21
0
    def __call__(self,
                 loss_type,
                 assets,
                 hazard_curves,
                 _epsilons=None,
                 _tags=None):
        """
        :param loss_type: the string 'damage'
        :param assets: a list of N assets of the same taxonomy
        :param hazard_curves: an array of N x E elements
        :returns: an array of N assets and an array of N x D elements

        where N is the number of points and D the number of damage states.
        """
        fractions = utils.numpy_map(self.curves, hazard_curves)
        damages = [
            asset.number * fraction
            for asset, fraction in zip(assets, fractions)
        ]
        return scientific.Output(assets, 'damage', damages=damages)
Пример #22
0
    def apply_to(self, ground_motion_values, epsilons):
        """
        Apply a copy of the vulnerability function to a set of N
        ground motion vectors, by using N epsilon vectors of length R,
        where N is the number of assets and R the number of realizations.

        :param ground_motion_values:
           matrix of floats N x R
        :param epsilons:
           matrix of floats N x R
        """
        # NB: changing the order of the ground motion values for a given
        # asset without changing the order of the corresponding epsilon
        # values gives inconsistent results, see the MeanLossTestCase
        assert len(epsilons) == len(ground_motion_values), (
            len(epsilons), len(ground_motion_values))
        vulnerability_function = copy.copy(self)
        vulnerability_function.set_distribution(epsilons)
        return utils.numpy_map(
            vulnerability_function._apply, ground_motion_values)
Пример #23
0
    def __call__(self,
                 loss_type,
                 assets,
                 ground_motion_values,
                 epsilons,
                 _tags=None):
        # FIXME: remove this when the engine calculator will be removed
        engine = hasattr(assets[0], 'asset_ref')
        values = get_values(loss_type, assets, self.time_event)

        # a matrix of N x E elements
        loss_ratio_matrix = self.risk_functions[loss_type].apply_to(
            ground_motion_values, epsilons)
        # another matrix of N x E elements
        loss_matrix = (loss_ratio_matrix.T * values).T
        # an array of E elements
        aggregate_losses = loss_matrix.sum(axis=0)

        if self.insured_losses and loss_type != "fatalities":
            deductibles = [a.deductible(loss_type) for a in assets]
            limits = [a.insurance_limit(loss_type) for a in assets]
            insured_loss_ratio_matrix = utils.numpy_map(
                scientific.insured_losses, loss_ratio_matrix, deductibles,
                limits)
            insured_loss_matrix = (insured_loss_ratio_matrix.T * values).T
        else:
            insured_loss_matrix = numpy.empty_like(loss_ratio_matrix)
            insured_loss_matrix.fill(numpy.nan)

        # aggregating per asset, getting a vector of E elements
        insured_losses = insured_loss_matrix.sum(axis=0)
        return scientific.Output(assets,
                                 loss_type,
                                 loss_matrix=loss_matrix,
                                 loss_ratio_matrix=loss_ratio_matrix,
                                 aggregate_losses=aggregate_losses,
                                 insured_loss_matrix=NoneOr(
                                     engine, insured_loss_matrix),
                                 insured_losses=NoneOr(engine, insured_losses))
Пример #24
0
    def __call__(self, loss_type, assets, gmvs_eids, epsgetter):
        gmvs, eids = gmvs_eids
        epsilons = [epsgetter(asset.ordinal, eids) for asset in assets]
        values = get_values(loss_type, assets, self.time_event)
        ok = ~numpy.isnan(values)
        if not ok.any():
            # there are no assets with a value
            return
        # there may be assets without a value
        missing_value = not ok.all()
        if missing_value:
            assets = assets[ok]
            epsilons = epsilons[ok]

        E = len(epsilons[0])
        I = self.insured_losses + 1

        # a matrix of A x E x I elements
        loss_matrix = numpy.empty((len(assets), E, I))
        loss_matrix.fill(numpy.nan)

        vf = self.risk_functions[loss_type]
        means, covs, idxs = vf.interpolate(gmvs)
        loss_ratio_matrix = numpy.zeros((len(assets), E))
        for i, eps in enumerate(epsilons):
            loss_ratio_matrix[i, idxs] = vf.sample(means, covs, idxs, eps)
        loss_matrix[:, :, 0] = (loss_ratio_matrix.T * values).T

        if self.insured_losses and loss_type != "occupants":
            deductibles = [a.deductible(loss_type) for a in assets]
            limits = [a.insurance_limit(loss_type) for a in assets]
            insured_loss_ratio_matrix = utils.numpy_map(
                scientific.insured_losses, loss_ratio_matrix, deductibles,
                limits)
            loss_matrix[:, :, 1] = (insured_loss_ratio_matrix.T * values).T

        return loss_matrix
Пример #25
0
    def __call__(self, loss_type, assets, hazard_curves, _epsilons=None):
        """
        :param str loss_type:
            the loss type considered
        :param assets:
            assets is an iterator over N
            :class:`openquake.risklib.workflows.Asset` instances
        :param hazard_curves:
            curves is an iterator over hazard curves (numpy array shaped 2xR).
        :param _epsilons:
            ignored, here only for API compatibility with other calculators
        :returns:
            a :class:`openquake.risklib.workflows.Classical.Output` instance.
        """
        curves = self.curves[loss_type](hazard_curves)
        average_losses = numpy.array([scientific.average_loss(losses, poes)
                                      for losses, poes in curves])
        maps = self.maps(curves)
        fractions = self.fractions(curves)

        if self.insured_losses and loss_type != 'fatalities':
            deductibles = [a.deductible(loss_type) for a in assets]
            limits = [a.insurance_limit(loss_type) for a in assets]

            insured_curves = utils.numpy_map(
                scientific.insured_loss_curve, curves, deductibles, limits)
            average_insured_losses = [
                scientific.average_loss(losses, poes)
                for losses, poes in insured_curves]
        else:
            insured_curves = None
            average_insured_losses = None

        return self.Output(
            assets,
            curves, average_losses, insured_curves, average_insured_losses,
            maps, fractions)
Пример #26
0
    def __call__(self,
                 loss_type,
                 assets,
                 ground_motion_values,
                 epsilons,
                 _tags=None):
        values = get_values(loss_type, assets, self.time_event)

        # a matrix of N x R elements
        loss_ratio_matrix = self.risk_functions[loss_type].apply_to(
            ground_motion_values, epsilons)
        # another matrix of N x R elements
        loss_matrix = (loss_ratio_matrix.T * values).T
        # an array of R elements
        aggregate_losses = loss_matrix.sum(axis=0)

        if self.insured_losses and loss_type != "fatalities":
            deductibles = [a.deductible(loss_type) for a in assets]
            limits = [a.insurance_limit(loss_type) for a in assets]
            insured_loss_ratio_matrix = utils.numpy_map(
                scientific.insured_losses, loss_ratio_matrix, deductibles,
                limits)
            insured_loss_matrix = (insured_loss_ratio_matrix.T * values).T

            # aggregating per asset, getting a vector of R elements
            insured_losses = insured_loss_matrix.sum(axis=0)
        else:
            insured_loss_matrix = None
            insured_losses = None
        return scientific.Output(assets,
                                 loss_type,
                                 loss_matrix=loss_matrix,
                                 loss_ratio_matrix=loss_ratio_matrix,
                                 aggregate_losses=aggregate_losses,
                                 insured_loss_matrix=insured_loss_matrix,
                                 insured_losses=insured_losses)
Пример #27
0
def event_based(workflow, getter, outputdict, params, monitor):
    """
    Celery task for the event based risk calculator.

    :param job_id: the id of the current
        :class:`openquake.engine.db.models.OqJob`
    :param workflow:
      A :class:`openquake.risklib.workflows.Workflow` instance
    :param getter:
      A :class:`HazardGetter` instance
    :param outputdict:
      An instance of :class:`..writers.OutputDict` containing
      output container instances (e.g. a LossCurve)
    :param params:
      An instance of :class:`..base.CalcParams` used to compute
      derived outputs
    :param monitor:
      A monitor instance
    :returns:
      A dictionary {loss_type: event_loss_table}
    """
    # NB: event_loss_table is a dictionary (loss_type, out_id) -> loss,
    # out_id can be None, and it that case it stores the statistics
    event_loss_table = {}
    specific_assets = set(params.specific_assets)
    statistics = getattr(params, 'statistics', True)  # enabled by default
    # keep in memory the loss_matrix only when specific_assets are set
    workflow.return_loss_matrix = bool(specific_assets)

    # the insert here will work only if specific_assets is set
    inserter = writer.CacheInserter(
        models.EventLossAsset, max_cache_size=10000)
    for loss_type in workflow.loss_types:
        with monitor.copy('computing individual risk'):
            outputs = workflow.compute_all_outputs(getter, loss_type)
            if statistics:
                outputs = list(outputs)  # expand the generator
                # this is needed, otherwise the call to workflow.statistics
                # below will find an empty iterable; notice that by disabling
                # the statistics we can save memory by keeping only one
                # hazard realization at the time
        for out in outputs:
            event_loss_table[loss_type, out.hid] = out.output.event_loss_table
            disagg_outputs = None  # changed if params.sites_disagg is set
            if specific_assets:
                loss_matrix, assets = _filter_loss_matrix_assets(
                    out.output.loss_matrix, out.output.assets, specific_assets)
                if len(assets) == 0:  # no specific_assets
                    continue
                # compute the loss per rupture per asset
                event_loss = models.EventLoss.objects.get(
                    output__oq_job=monitor.job_id,
                    output__output_type='event_loss_asset',
                    loss_type=loss_type, hazard_output=out.hid)
                # losses is E x n matrix, where E is the number of ruptures
                # and n the number of assets in the specific_assets set
                losses = (loss_matrix.transpose() *
                          numpy_map(lambda a: a.value(loss_type), assets))
                # save an EventLossAsset record for each specific asset
                for rup_id, losses_per_rup in zip(
                        getter.rupture_ids, losses):
                    for asset, loss_per_rup in zip(assets, losses_per_rup):
                        ela = models.EventLossAsset(
                            event_loss=event_loss, rupture_id=rup_id,
                            asset=asset, loss=loss_per_rup)
                        inserter.add(ela)
                if params.sites_disagg:
                    with monitor.copy('disaggregating results'):
                        ruptures = [models.SESRupture.objects.get(pk=rid)
                                    for rid in getter.rupture_ids]
                        disagg_outputs = disaggregate(
                            out.output, [r.rupture for r in ruptures], params)

            with monitor.copy('saving individual risk'):
                save_individual_outputs(
                    outputdict.with_args(hazard_output_id=out.hid,
                                         loss_type=loss_type),
                    out.output, disagg_outputs, params)

        if statistics and len(outputs) > 1:
            stats = workflow.statistics(
                outputs, params.quantile_loss_curves, post_processing)

            with monitor.copy('saving risk statistics'):
                save_statistical_output(
                    outputdict.with_args(
                        hazard_output_id=None, loss_type=loss_type),
                    stats, params)
            event_loss_table[loss_type, None] = stats.event_loss_table

    inserter.flush()
    return event_loss_table
Пример #28
0
    def __call__(self, loss_type, assets, ground_motion_values, epsilons,
                 event_ids):
        """
        :param str loss_type: the loss type considered

        :param assets:
           assets is an iterator over
           :class:`openquake.risklib.scientific.Asset` instances

        :param ground_motion_values:
           a numpy array with ground_motion_values of shape N x R

        :param epsilons:
           a numpy array with stochastic values of shape N x R

        :param event_ids:
           a numpy array of R event ID (integer)

        :returns:
            a :class:
            `openquake.risklib.scientific.ProbabilisticEventBased.Output`
            instance.
        """
        loss_matrix = self.risk_functions[loss_type].apply_to(
            ground_motion_values, epsilons)
        values = get_values(loss_type, assets)
        ela = loss_matrix.T * values  # matrix with T x N elements
        if self.insured_losses and loss_type != 'fatalities':
            deductibles = [a.deductible(loss_type) for a in assets]
            limits = [a.insurance_limit(loss_type) for a in assets]
            ila = utils.numpy_map(scientific.insured_losses, loss_matrix,
                                  deductibles, limits)
        else:  # build a zero matrix of size T x N
            ila = numpy.zeros((len(ground_motion_values[0]), len(assets)))
        if isinstance(assets[0].id, str):
            # in oq-lite return early, with just the losses per asset
            cb = self.riskmodel.curve_builders[self.riskmodel.lti[loss_type]]
            return scientific.Output(
                assets,
                loss_type,
                event_loss_per_asset=ela,
                insured_loss_per_asset=ila,
                counts_matrix=cb.build_counts(loss_matrix),
                insured_counts_matrix=cb.build_counts(ila),
                tags=event_ids)

        # in the engine, compute more stuff on the workers
        curves = utils.numpy_map(self.curves, loss_matrix)
        average_losses = utils.numpy_map(scientific.average_loss, curves)
        stddev_losses = numpy.std(loss_matrix, axis=1)
        maps = scientific.loss_map_matrix(self.conditional_loss_poes, curves)
        elt = self.event_loss(ela, event_ids)

        if self.insured_losses and loss_type != 'fatalities':
            insured_curves = utils.numpy_map(self.curves, ila)
            average_insured_losses = utils.numpy_map(scientific.average_loss,
                                                     insured_curves)
            stddev_insured_losses = numpy.std(ila, axis=1)
        else:
            insured_curves = None
            average_insured_losses = None
            stddev_insured_losses = None
        return scientific.Output(
            assets,
            loss_type,
            loss_matrix=loss_matrix if self.return_loss_matrix else None,
            loss_curves=curves,
            average_losses=average_losses,
            stddev_losses=stddev_losses,
            insured_curves=insured_curves,
            average_insured_losses=average_insured_losses,
            stddev_insured_losses=stddev_insured_losses,
            loss_maps=maps,
            event_loss_table=elt)
Пример #29
0
def event_based(workflow, getter, outputdict, params, monitor):
    """
    Celery task for the event based risk calculator.

    :param job_id: the id of the current
        :class:`openquake.engine.db.models.OqJob`
    :param workflow:
      A :class:`openquake.risklib.workflows.Workflow` instance
    :param getter:
      A :class:`HazardGetter` instance
    :param outputdict:
      An instance of :class:`..writers.OutputDict` containing
      output container instances (e.g. a LossCurve)
    :param params:
      An instance of :class:`..base.CalcParams` used to compute
      derived outputs
    :param monitor:
      A monitor instance
    :returns:
      A dictionary {loss_type: event_loss_table}
    """
    # NB: event_loss_table is a dictionary (loss_type, out_id) -> loss,
    # out_id can be None, and it that case it stores the statistics
    event_loss_table = {}

    # num_loss is a dictionary asset_ref -> array([not_zeros, total])
    num_losses = collections.defaultdict(lambda: numpy.zeros(2, dtype=int))

    specific_assets = set(params.specific_assets)
    statistics = params.statistics  # enabled by default
    # keep in memory the loss_matrix only when specific_assets are set
    workflow.return_loss_matrix = bool(specific_assets)

    # the insert here will work only if specific_assets is set
    inserter = writer.CacheInserter(
        models.EventLossAsset, max_cache_size=10000)
    for loss_type in workflow.loss_types:
        with monitor('computing individual risk', autoflush=True):
            outputs = workflow.compute_all_outputs(getter, loss_type)
            if statistics:
                outputs = list(outputs)  # expand the generator
                # this is needed, otherwise the call to workflow.statistics
                # below will find an empty iterable; notice that by disabling
                # the statistics we can save memory by keeping only one
                # hazard realization at the time
        for out in outputs:
            event_loss_table[loss_type, out.hid] = out.event_loss_table
            disagg_outputs = None  # changed if params.sites_disagg is set
            if specific_assets:
                loss_matrix, assets = _filter_loss_matrix_assets(
                    out.loss_matrix, out.assets, specific_assets)
                if len(assets):
                    # compute the loss per rupture per asset
                    event_loss = models.EventLoss.objects.get(
                        output__oq_job=monitor.job_id,
                        output__output_type='event_loss_asset',
                        loss_type=loss_type, hazard_output=out.hid)
                    # losses is E x n matrix, where E is the number of ruptures
                    # and n the number of assets in the specific_assets set
                    losses = (loss_matrix.transpose() *
                              numpy_map(lambda a: a.value(loss_type), assets))
                    # save an EventLossAsset record for each specific asset
                    for rup_id, losses_per_rup in zip(
                            getter.rupture_ids, losses):
                        for asset, loss_per_rup in zip(assets, losses_per_rup):
                            if loss_per_rup:  # save only non-zero losses
                                ela = models.EventLossAsset(
                                    event_loss=event_loss, rupture_id=rup_id,
                                    asset=asset, loss=loss_per_rup)
                                inserter.add(ela)
                            # update the counters: not_zeros is incremented
                            # only if loss_per_rup is nonzero, total always
                            num_losses[asset.asset_ref] += numpy.array(
                                [bool(loss_per_rup), 1])
                    if params.sites_disagg:
                        with monitor('disaggregating results', autoflush=True):
                            ruptures = [models.SESRupture.objects.get(pk=rid)
                                        for rid in getter.rupture_ids]
                            disagg_outputs = disaggregate(
                                out, [r.rupture for r in ruptures], params)

            with monitor('saving individual risk', autoflush=True):
                save_individual_outputs(
                    outputdict.with_args(hazard_output_id=out.hid,
                                         loss_type=loss_type),
                    out, disagg_outputs, params)

        if statistics and len(outputs) > 1:
            stats = workflow.statistics(
                outputs, params.quantile_loss_curves)

            with monitor('saving risk statistics', autoflush=True):
                save_statistical_output(
                    outputdict.with_args(
                        hazard_output_id=None, loss_type=loss_type),
                    stats, params)
            event_loss_table[loss_type, None] = stats.event_loss_table

    inserter.flush()

    # log info about the rows entered in the event_loss_asset table
    for asset_ref in sorted(num_losses):
        not_zeros, total = num_losses[asset_ref]
        logs.LOG.info('Saved %d/%d losses for asset %s',
                      not_zeros, total, asset_ref)
    return event_loss_table
Пример #30
0
    def __call__(self, loss_type, assets, ground_motion_values, epsilons,
                 event_ids):
        """
        :param str loss_type: the loss type considered

        :param assets:
           assets is an iterator over
           :class:`openquake.risklib.scientific.Asset` instances

        :param ground_motion_values:
           a numpy array with ground_motion_values of shape N x R

        :param epsilons:
           a numpy array with stochastic values of shape N x R

        :param event_ids:
           a numpy array of R event ID (integer)

        :returns:
            a :class:
            `openquake.risklib.scientific.ProbabilisticEventBased.Output`
            instance.
        """
        loss_matrix = self.risk_functions[loss_type].apply_to(
            ground_motion_values, epsilons)
        values = get_values(loss_type, assets)
        ela = loss_matrix.T * values  # matrix with T x N elements
        if self.insured_losses and loss_type != 'fatalities':
            deductibles = [a.deductible(loss_type) for a in assets]
            limits = [a.insurance_limit(loss_type) for a in assets]
            ila = utils.numpy_map(
                scientific.insured_losses, loss_matrix, deductibles, limits)
        else:  # build a zero matrix of size T x N
            ila = numpy.zeros((len(ground_motion_values[0]), len(assets)))
        if isinstance(assets[0].id, str):
            # in oq-lite return early, with just the losses per asset
            cb = self.riskmodel.curve_builders[self.riskmodel.lti[loss_type]]
            return scientific.Output(
                assets, loss_type,
                event_loss_per_asset=ela,
                insured_loss_per_asset=ila,
                counts_matrix=cb.build_counts(loss_matrix),
                insured_counts_matrix=cb.build_counts(ila),
                tags=event_ids)

        # in the engine, compute more stuff on the workers
        curves = utils.numpy_map(self.curves, loss_matrix)
        average_losses = utils.numpy_map(scientific.average_loss, curves)
        stddev_losses = numpy.std(loss_matrix, axis=1)
        maps = scientific.loss_map_matrix(self.conditional_loss_poes, curves)
        elt = self.event_loss(ela, event_ids)

        if self.insured_losses and loss_type != 'fatalities':
            insured_curves = utils.numpy_map(self.curves, ila)
            average_insured_losses = utils.numpy_map(
                scientific.average_loss, insured_curves)
            stddev_insured_losses = numpy.std(ila, axis=1)
        else:
            insured_curves = None
            average_insured_losses = None
            stddev_insured_losses = None
        return scientific.Output(
            assets, loss_type,
            loss_matrix=loss_matrix if self.return_loss_matrix else None,
            loss_curves=curves, average_losses=average_losses,
            stddev_losses=stddev_losses, insured_curves=insured_curves,
            average_insured_losses=average_insured_losses,
            stddev_insured_losses=stddev_insured_losses,
            loss_maps=maps, event_loss_table=elt)