Пример #1
0
    def _compute_bcr(self, block_id):
        """
        Calculate and store in the kvs the benefit-cost ratio data for block.

        A value is stored with key :func:`openquake.kvs.tokens.bcr_block_key`.
        See :func:`openquake.risk.job.general.compute_bcr_for_block` for result
        data structure spec.
        """

        result = defaultdict(list)
        seed, correlation_type = self._get_correlation_type()
        block = general.Block.from_kvs(self.job_ctxt.job_id, block_id)
        loss_histogram_bins = self.job_ctxt.oq_job_profile.loss_histogram_bins

        vulnerability_model_original = vulnerability.load_vuln_model_from_kvs(
            self.job_ctxt.job_id)

        vulnerability_model_retrofitted = (
            vulnerability.load_vuln_model_from_kvs(
            self.job_ctxt.job_id, retrofitted=True))

        assets_getter = lambda site: general.BaseRiskCalculator.assets_at(
            self.job_ctxt.job_id, site)

        def hazard_getter(site):
            gmvs = self._get_gmvs_at(general.hazard_input_site(
                self.job_ctxt, site))

            return {"IMLs": gmvs, "TSES": self._tses(),
                "TimeSpan": self._time_span()}

        bcr = api.bcr(api.probabilistic_event_based(
            vulnerability_model_original, loss_histogram_bins, seed,
            correlation_type), api.probabilistic_event_based(
            vulnerability_model_retrofitted, loss_histogram_bins, seed,
            correlation_type), float(self.job_ctxt.params["INTEREST_RATE"]),
            float(self.job_ctxt.params["ASSET_LIFE_EXPECTANCY"]))

        for asset_output in api.compute_on_sites(
            block.sites, assets_getter, hazard_getter, bcr):

            asset = asset_output.asset

            result[(asset.site.x, asset.site.y)].append(({
                "bcr": asset_output.bcr,
                "eal_original": asset_output.eal_original,
                "eal_retrofitted": asset_output.eal_retrofitted},
                asset.asset_ref))

        bcr_block_key = kvs.tokens.bcr_block_key(
            self.job_ctxt.job_id, block_id)

        result = result.items()
        kvs.set_value_json_encoded(bcr_block_key, result)
        LOGGER.debug("bcr result for block %s: %r", block_id, result)
Пример #2
0
    def _compute_bcr(self, block_id):
        """
        Calculate and store in the kvs the benefit-cost ratio data for block.

        A value is stored with key :func:`openquake.kvs.tokens.bcr_block_key`.
        See :func:`openquake.risk.job.general.compute_bcr_for_block` for result
        data structure spec.
        """

        result = defaultdict(list)
        block = Block.from_kvs(self.job_ctxt.job_id, block_id)

        vulnerability_model_original = vulnerability.load_vuln_model_from_kvs(
            self.job_ctxt.job_id)

        vulnerability_model_retrofitted = (
            vulnerability.load_vuln_model_from_kvs(
            self.job_ctxt.job_id, retrofitted=True))

        steps = self.job_ctxt.oq_job_profile.lrem_steps_per_interval

        assets_getter = lambda site: BaseRiskCalculator.assets_at(
            self.job_ctxt.job_id, site)

        hazard_getter = lambda site: (
            self._get_db_curve(hazard_input_site(self.job_ctxt, site)))

        bcr = api.bcr(api.classical(vulnerability_model_original, steps=steps),
            api.classical(vulnerability_model_retrofitted, steps=steps),
            float(self.job_ctxt.params["INTEREST_RATE"]),
            float(self.job_ctxt.params["ASSET_LIFE_EXPECTANCY"]))

        for asset_output in api.compute_on_sites(
            block.sites, assets_getter, hazard_getter, bcr):

            asset = asset_output.asset

            result[(asset.site.x, asset.site.y)].append(({
                "bcr": asset_output.bcr,
                "eal_original": asset_output.eal_original,
                "eal_retrofitted": asset_output.eal_retrofitted},
                asset.asset_ref))

        bcr = result.items()
        bcr_block_key = kvs.tokens.bcr_block_key(
            self.job_ctxt.job_id, block_id)

        kvs.set_value_json_encoded(bcr_block_key, bcr)
        LOGGER.debug("bcr result for block %s: %r", block_id, bcr)

        return True
Пример #3
0
    def _compute_loss(self, block_id):
        """
        Calculate and store in the kvs the loss data.
        """
        block = Block.from_kvs(self.job_ctxt.job_id, block_id)

        vulnerability_model = vulnerability.load_vuln_model_from_kvs(
            self.job_ctxt.job_id)

        steps = self.job_ctxt.oq_job_profile.lrem_steps_per_interval

        assets_getter = lambda site: BaseRiskCalculator.assets_at(
            self.job_ctxt.job_id, site)

        hazard_getter = lambda site: (
            self._get_db_curve(hazard_input_site(self.job_ctxt, site)))

        calculator = api.conditional_losses(
            conditional_loss_poes(self.job_ctxt.params),
            api.classical(vulnerability_model, steps=steps))

        for asset_output in api.compute_on_sites(block.sites,
            assets_getter, hazard_getter, calculator):

            location = asset_output.asset.site

            point = self.job_ctxt.region.grid.point_at(
                shapes.Site(location.x, location.y))

            loss_key = kvs.tokens.loss_curve_key(
                self.job_ctxt.job_id, point.row,
                point.column, asset_output.asset.asset_ref)

            kvs.get_client().set(loss_key, asset_output.loss_curve.to_json())

            loss_ratio_key = kvs.tokens.loss_ratio_key(self.job_ctxt.job_id,
                point.row, point.column, asset_output.asset.asset_ref)

            kvs.get_client().set(loss_ratio_key,
                asset_output.loss_ratio_curve.to_json())

            for poe, loss in asset_output.conditional_losses.items():
                key = kvs.tokens.loss_key(
                    self.job_ctxt.job_id, point.row, point.column,
                    asset_output.asset.asset_ref, poe)

                kvs.get_client().set(key, loss)
Пример #4
0
    def compute_risk(self, block_id, **kwargs):
        """
        Compute the results for a single block.

        Currently we support the computation of:
        * damage distributions per asset
        * damage distributions per building taxonomy
        * total damage distribution
        * collapse maps

        :param block_id: id of the region block data.
        :type block_id: integer
        :keyword fmodel: fragility model associated to this computation.
        :type fmodel: instance of
            :py:class:`openquake.db.models.FragilityModel`
        :return: the sum of the fractions (for each damage state)
            per asset taxonomy for the computed block.
        :rtype: `dict` where each key is a string representing a
            taxonomy and each value is the sum of fractions of all
            the assets related to that taxonomy (represented as
            a 2d `numpy.array`)
        """

        fragility_model = _fm(self.job_ctxt.oq_job)
        fragility_functions = fragility_model.functions_by_taxonomy()
        block = general.Block.from_kvs(self.job_ctxt.job_id, block_id)

        hazard_getter = lambda site: general.load_gmvs_at(
            self.job_ctxt.job_id, general.hazard_input_site(
            self.job_ctxt, site))

        assets_getter = lambda site: general.BaseRiskCalculator.assets_at(
            self.job_ctxt.job_id, site)

        calculator = api.scenario_damage(fragility_model, fragility_functions)

        for asset_output in api.compute_on_sites(block.sites,
            assets_getter, hazard_getter, calculator):

            self._store_cmap(asset_output.asset, asset_output.collapse_map)

            self._store_dda(asset_output.asset,
                scenario_damage.damage_states(fragility_model),
                asset_output.damage_distribution_asset)

        return calculator.damage_distribution_by_taxonomy
Пример #5
0
    def test_multiple_sites(self):
        asset = scientific.Asset("a1", None, None, None)
        sites = [(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)]

        calculator = mock.Mock()
        hazard_getter = mock.Mock(return_value=1.0)
        assets_getter = mock.Mock(return_value=[asset])

        list(api.compute_on_sites(
             sites, assets_getter, hazard_getter, calculator))

        expected_calls = [(((1.0, 1.0),), {}), (((2.0, 2.0),), {}),
                          (((3.0, 3.0),), {})]

        self.assertEquals(expected_calls, assets_getter.call_args_list)
        self.assertEquals(expected_calls, hazard_getter.call_args_list)

        self.assertEquals([((asset, 1.0), {})] * 3,
                          calculator.call_args_list)
Пример #6
0
    def test_multiple_assets_per_site(self):
        sites = [(1.0, 1.0)]

        assets = [
            scientific.Asset("a1", None, None, None),
            scientific.Asset("a2", None, None, None),
            scientific.Asset("a3", None, None, None),
        ]

        calculator = mock.Mock()
        hazard_getter = mock.Mock(return_value=1.0)
        assets_getter = mock.Mock(return_value=assets)

        list(api.compute_on_sites(
             sites, assets_getter, hazard_getter, calculator))

        expected_calls = [((assets[0], 1.0), {}), ((assets[1], 1.0), {}),
                          ((assets[2], 1.0), {})]

        self.assertEquals(expected_calls, calculator.call_args_list)
Пример #7
0
    def compute_risk(self, block_id, **kwargs):
        """
        This method will perform two distinct (but similar) computations and
        return a result for each computation. The computations are as follows:

        First:

        For a given block of sites, compute loss values for all assets in the
        block. This computation will yield a single loss value per realization
        for the region block.

        Second:

        For each asset in the given block of sites, we need compute loss
        (where loss = loss_ratio * asset_value) for each realization. This
        gives 1 loss value _per_ asset _per_ realization. We then need to take
        the mean & standard deviation.

        Other info:

        The GMF data for each realization is stored in the KVS by the preceding
        scenario hazard job.

        :param block_id: id of the region block data we need to pull from the
            KVS
        :type block_id: str
        :keyword vuln_model:
            dict of :py:class:`openquake.shapes.VulnerabilityFunction` objects,
            keyed by the vulnerability function name as a string
        :keyword epsilon_provider:
            :py:class:`openquake.risk.job.EpsilonProvider` object

        :returns: 2-tuple of the following data:
            * 1-dimensional :py:class:`numpy.ndarray` of loss values for this
                region block (again, 1 value per realization)

            * list of 2-tuples containing site, loss, and asset
                information.

                The first element of each 2-tuple shall be a
                :py:class:`openquake.shapes.Site` object, which represents the
                geographical location of the asset loss.

                The second element shall be a list of
                2-tuples of dicts representing the loss and asset data (in that
                order).

                Example::

                    [(<Site(-117.0, 38.0)>, [
                        ({'mean_loss': 200.0, 'stddev_loss': 100},
                            {'assetID': 'a171'}),
                        ({'mean_loss': 200.0, 'stddev_loss': 100},
                            {'assetID': 'a187'})
                    ]),
                     (<Site(-118.0, 39.0)>, [
                        ({'mean_loss': 50, 'stddev_loss': 50.0},
                            {'assetID': 'a192'})
                    ])]
        """

        insured = kwargs["insured_losses"]
        vulnerability_model = kwargs["vuln_model"]
        seed, correlation_type = self._get_correlation_type()
        block = general.Block.from_kvs(self.job_ctxt.job_id, block_id)

        assets_getter = lambda site: general.BaseRiskCalculator.assets_at(self.job_ctxt.job_id, site)

        hazard_getter = lambda site: general.load_gmvs_at(
            self.job_ctxt.job_id, general.hazard_input_site(self.job_ctxt, site)
        )

        loss_map_data = {}

        calculator = api.scenario_risk(vulnerability_model, seed, correlation_type, insured)

        for asset_output in api.compute_on_sites(block.sites, assets_getter, hazard_getter, calculator):

            asset_site = shapes.Site(asset_output.asset.site.x, asset_output.asset.site.y)

            collect_block_data(
                loss_map_data,
                asset_site,
                (
                    {"mean_loss": asset_output.mean, "stddev_loss": asset_output.standard_deviation},
                    {"assetID": asset_output.asset.asset_ref},
                ),
            )

        return calculator.aggregate_losses, loss_map_data
Пример #8
0
    def _compute_loss(self, block_id):
        """Compute risk for a block of sites, that means:

        * loss ratio curves
        * loss curves
        * conditional losses
        * (partial) aggregate loss curve
        """

        self.vulnerability_model = vulnerability.load_vuln_model_from_kvs(
            self.job_ctxt.job_id)

        seed, correlation_type = self._get_correlation_type()
        block = general.Block.from_kvs(self.job_ctxt.job_id, block_id)
        loss_histogram_bins = self.job_ctxt.oq_job_profile.loss_histogram_bins

        def hazard_getter(site):
            gmvs = self._get_gmvs_at(general.hazard_input_site(
                self.job_ctxt, site))

            return {"IMLs": gmvs, "TSES": self._tses(),
                "TimeSpan": self._time_span()}

        assets_getter = lambda site: general.BaseRiskCalculator.assets_at(
            self.job_ctxt.job_id, site)

        probabilistic_event_based_calculator = api.probabilistic_event_based(
            self.vulnerability_model, loss_histogram_bins,
            seed, correlation_type)

        calculator = api.conditional_losses(general.conditional_loss_poes(
            self.job_ctxt.params), probabilistic_event_based_calculator)

        if self.job_ctxt.params.get("INSURED_LOSSES"):
            calculator = api.insured_curves(self.vulnerability_model,
                loss_histogram_bins, seed, correlation_type,
                api.insured_losses(calculator))

        for asset_output in api.compute_on_sites(block.sites,
            assets_getter, hazard_getter, calculator):

            location = asset_output.asset.site

            point = self.job_ctxt.region.grid.point_at(
                shapes.Site(location.x, location.y))

            self._loss_ratio_curve_on_kvs(
                point.column, point.row, asset_output.loss_ratio_curve,
                asset_output.asset)

            self._loss_curve_on_kvs(
                point.column, point.row, asset_output.loss_curve,
                asset_output.asset)

            for poe, loss in asset_output.conditional_losses.items():
                key = kvs.tokens.loss_key(
                    self.job_ctxt.job_id, point.row, point.column,
                    asset_output.asset.asset_ref, poe)

                kvs.get_client().set(key, loss)

            if self.job_ctxt.params.get("INSURED_LOSSES"):
                self._insured_loss_curve_on_kvs(
                    point.column, point.row,
                    asset_output.insured_loss_curve, asset_output.asset)

                self._insured_loss_ratio_curve_on_kvs(
                    point.column, point.row,
                    asset_output.insured_loss_ratio_curve, asset_output.asset)

        return probabilistic_event_based_calculator.aggregate_losses