def test_load_gmvs_at(self):
        """
        Exercise the function
        :func:`openquake.calculators.risk.general.load_gmvs_at`.
        """

        gmvs = [
            {'site_lon': 0.1, 'site_lat': 0.2, 'mag': 0.117},
            {'site_lon': 0.1, 'site_lat': 0.2, 'mag': 0.167},
            {'site_lon': 0.1, 'site_lat': 0.2, 'mag': 0.542}]

        expected_gmvs = [0.117, 0.167, 0.542]
        point = self.region.grid.point_at(shapes.Site(0.1, 0.2))

        # we expect this point to be at row 1, column 0
        self.assertEqual(1, point.row)
        self.assertEqual(0, point.column)

        key = kvs.tokens.ground_motion_values_key(self.job_id, point)

        # place the test values in kvs
        for gmv in gmvs:
            kvs.get_client().rpush(key, json.JSONEncoder().encode(gmv))

        actual_gmvs = load_gmvs_at(self.job_id, point)
        self.assertEqual(expected_gmvs, actual_gmvs)
Exemple #2
0
    def test_load_gmvs_at(self):
        """
        Exercise the function
        :func:`openquake.calculators.risk.general.load_gmvs_at`.
        """

        gmvs = [{
            'site_lon': 0.1,
            'site_lat': 0.2,
            'mag': 0.117
        }, {
            'site_lon': 0.1,
            'site_lat': 0.2,
            'mag': 0.167
        }, {
            'site_lon': 0.1,
            'site_lat': 0.2,
            'mag': 0.542
        }]

        expected_gmvs = [0.117, 0.167, 0.542]
        point = self.region.grid.point_at(shapes.Site(0.1, 0.2))

        # we expect this point to be at row 1, column 0
        self.assertEqual(1, point.row)
        self.assertEqual(0, point.column)

        key = kvs.tokens.ground_motion_values_key(self.job_id, point)

        # place the test values in kvs
        for gmv in gmvs:
            kvs.get_client().rpush(key, json.JSONEncoder().encode(gmv))

        actual_gmvs = load_gmvs_at(self.job_id, point)
        self.assertEqual(expected_gmvs, actual_gmvs)
Exemple #3
0
    def compute_risk(self, block_id, **kwargs):
        """
        Compute the results for a single block.

        Currently we support the computation of:
        * damage distributions per asset
        * damage distributions per building taxonomy
        * total damage distributions

        :param block_id: id of the region block data.
        :type block_id: integer
        :keyword fmodel: fragility model associated to this computation.
        :type fmodel: instance of
            :py:class:`openquake.db.models.FragilityModel`
        :return: the sum of the fractions (for each damage state)
            per asset taxonomy for the computed block.
        :rtype: `dict` where each key is a string representing a
            taxonomy and each value is the sum of fractions of all
            the assets related to that taxonomy (represented as
            a 2d `numpy.array`)
        """

        fm = kwargs["fmodel"]
        block = general.Block.from_kvs(self.job_ctxt.job_id, block_id)
        fset = fm.ffd_set if fm.format == "discrete" else fm.ffc_set

        # fractions of each damage state per building taxonomy
        # for the given block
        ddt_fractions = {}

        for site in block.sites:
            point = self.job_ctxt.region.grid.point_at(site)
            gmf = general.load_gmvs_at(self.job_ctxt.job_id, point)

            assets = general.BaseRiskCalculator.assets_at(
                self.job_ctxt.job_id, site)

            for asset in assets:
                funcs = fset.filter(
                    taxonomy=asset.taxonomy).order_by("lsi")

                assert len(funcs) > 0, ("no limit states associated "
                        "with taxonomy %s of asset %s.") % (
                        asset.taxonomy, asset.asset_ref)

                fractions = compute_gmf_fractions(
                    gmf, funcs) * asset.number_of_units

                current_fractions = ddt_fractions.get(
                    asset.taxonomy, numpy.zeros((len(gmf), len(funcs) + 1)))

                ddt_fractions[asset.taxonomy] = current_fractions + fractions
                self._store_dda(fractions, asset, fm)

        return ddt_fractions
Exemple #4
0
    def compute_risk(self, block_id, **kwargs):
        """
        Compute the results for a single block.

        Currently we support the computation of:
        * damage distributions per asset
        * damage distributions per building taxonomy
        * total damage distribution
        * collapse maps

        :param block_id: id of the region block data.
        :type block_id: integer
        :keyword fmodel: fragility model associated to this computation.
        :type fmodel: instance of
            :py:class:`openquake.db.models.FragilityModel`
        :return: the sum of the fractions (for each damage state)
            per asset taxonomy for the computed block.
        :rtype: `dict` where each key is a string representing a
            taxonomy and each value is the sum of fractions of all
            the assets related to that taxonomy (represented as
            a 2d `numpy.array`)
        """

        fragility_model = _fm(self.job_ctxt.oq_job)
        fragility_functions = fragility_model.functions_by_taxonomy()
        block = general.Block.from_kvs(self.job_ctxt.job_id, block_id)

        hazard_getter = lambda site: general.load_gmvs_at(
            self.job_ctxt.job_id, general.hazard_input_site(
            self.job_ctxt, site))

        assets_getter = lambda site: general.BaseRiskCalculator.assets_at(
            self.job_ctxt.job_id, site)

        calculator = api.scenario_damage(fragility_model, fragility_functions)

        for asset_output in api.compute_on_sites(block.sites,
            assets_getter, hazard_getter, calculator):

            self._store_cmap(asset_output.asset, asset_output.collapse_map)

            self._store_dda(asset_output.asset,
                scenario_damage.damage_states(fragility_model),
                asset_output.damage_distribution_asset)

        return calculator.damage_distribution_by_taxonomy
Exemple #5
0
    def compute_risk(self, block_id, **kwargs):
        """
        Compute the results for a single block.

        Currently we support the computation of:
        * damage distributions per asset
        * damage distributions per building taxonomy
        * total damage distribution
        * collapse maps

        :param block_id: id of the region block data.
        :type block_id: integer
        :keyword fmodel: fragility model associated to this computation.
        :type fmodel: instance of
            :py:class:`openquake.db.models.FragilityModel`
        :return: the sum of the fractions (for each damage state)
            per asset taxonomy for the computed block.
        :rtype: `dict` where each key is a string representing a
            taxonomy and each value is the sum of fractions of all
            the assets related to that taxonomy (represented as
            a 2d `numpy.array`)
        """

        fm = kwargs["fmodel"]
        block = general.Block.from_kvs(self.job_ctxt.job_id, block_id)
        fset = fm.ffd_set if fm.format == "discrete" else fm.ffc_set

        # fractions of each damage state per building taxonomy
        # for the given block
        ddt_fractions = {}

        for site in block.sites:
            gmf = general.load_gmvs_at(
                self.job_ctxt.job_id,
                general.hazard_input_site(self.job_ctxt, site))

            assets = general.BaseRiskCalculator.assets_at(
                self.job_ctxt.job_id, site)

            for asset in assets:
                funcs = fset.filter(taxonomy=asset.taxonomy).order_by("lsi")

                assert len(funcs) > 0, ("no limit states associated "
                                        "with taxonomy %s of asset %s.") % (
                                            asset.taxonomy, asset.asset_ref)

                fractions = compute_gmf_fractions(
                    gmf, funcs) * asset.number_of_units

                current_fractions = ddt_fractions.get(
                    asset.taxonomy, numpy.zeros((len(gmf), len(funcs) + 1)))

                ddt_fractions[asset.taxonomy] = current_fractions + fractions

                self._store_dda(fractions, asset, fm)

                # the collapse map needs the fractions
                # for each ground motion value of the
                # last damage state (the last column)
                self._store_cmap(fractions[:, -1], asset)

        return ddt_fractions
Exemple #6
0
    def compute_risk(self, block_id, **kwargs):
        """
        This method will perform two distinct (but similar) computations and
        return a result for each computation. The computations are as follows:

        First:

        For a given block of sites, compute loss values for all assets in the
        block. This computation will yield a single loss value per realization
        for the region block.

        Second:

        For each asset in the given block of sites, we need compute loss
        (where loss = loss_ratio * asset_value) for each realization. This
        gives 1 loss value _per_ asset _per_ realization. We then need to take
        the mean & standard deviation.

        Other info:

        The GMF data for each realization is stored in the KVS by the preceding
        scenario hazard job.

        :param block_id: id of the region block data we need to pull from the
            KVS
        :type block_id: str
        :keyword vuln_model:
            dict of :py:class:`openquake.shapes.VulnerabilityFunction` objects,
            keyed by the vulnerability function name as a string
        :keyword epsilon_provider:
            :py:class:`openquake.risk.job.EpsilonProvider` object

        :returns: 2-tuple of the following data:
            * 1-dimensional :py:class:`numpy.ndarray` of loss values for this
                region block (again, 1 value per realization)

            * list of 2-tuples containing site, loss, and asset
                information.

                The first element of each 2-tuple shall be a
                :py:class:`openquake.shapes.Site` object, which represents the
                geographical location of the asset loss.

                The second element shall be a list of
                2-tuples of dicts representing the loss and asset data (in that
                order).

                Example::

                    [(<Site(-117.0, 38.0)>, [
                        ({'mean_loss': 200.0, 'stddev_loss': 100},
                            {'assetID': 'a171'}),
                        ({'mean_loss': 200.0, 'stddev_loss': 100},
                            {'assetID': 'a187'})
                    ]),
                     (<Site(-118.0, 39.0)>, [
                        ({'mean_loss': 50, 'stddev_loss': 50.0},
                            {'assetID': 'a192'})
                    ])]
        """

        vuln_model = kwargs['vuln_model']
        epsilon_provider = general.EpsilonProvider(self.job_ctxt.params)
        block = general.Block.from_kvs(self.job_ctxt.job_id, block_id)

        loss_data = {}

        # used to sum the losses for the whole block
        sum_per_gmf = SumPerGroundMotionField(vuln_model, epsilon_provider)

        for site in block.sites:
            point = self.job_ctxt.region.grid.point_at(site)

            # the scientific functions used below
            # require the gmvs to be wrapped in a dict with a single key, IMLs
            gmvs = {'IMLs': general.load_gmvs_at(self.job_ctxt.job_id, point)}

            assets = general.BaseRiskCalculator.assets_at(
                self.job_ctxt.job_id, site)

            for asset in assets:
                vuln_function = vuln_model[asset.taxonomy]

                asset_mean_loss = compute_mean_loss(vuln_function, gmvs,
                                                    epsilon_provider, asset)

                asset_stddev_loss = compute_stddev_loss(
                    vuln_function, gmvs, epsilon_provider, asset)

                asset_site = shapes.Site(asset.site.x, asset.site.y)

                loss = ({
                    'mean_loss': asset_mean_loss,
                    'stddev_loss': asset_stddev_loss
                }, {
                    'assetID': asset.asset_ref
                })

                sum_per_gmf.add(gmvs, asset)
                collect_block_data(loss_data, asset_site, loss)

        return sum_per_gmf.losses, loss_data
Exemple #7
0
    def compute_risk(self, block_id, **kwargs):
        """
        This method will perform two distinct (but similar) computations and
        return a result for each computation. The computations are as follows:

        First:

        For a given block of sites, compute loss values for all assets in the
        block. This computation will yield a single loss value per realization
        for the region block.

        Second:

        For each asset in the given block of sites, we need compute loss
        (where loss = loss_ratio * asset_value) for each realization. This
        gives 1 loss value _per_ asset _per_ realization. We then need to take
        the mean & standard deviation.

        Other info:

        The GMF data for each realization is stored in the KVS by the preceding
        scenario hazard job.

        :param block_id: id of the region block data we need to pull from the
            KVS
        :type block_id: str
        :keyword vuln_model:
            dict of :py:class:`openquake.shapes.VulnerabilityFunction` objects,
            keyed by the vulnerability function name as a string
        :keyword epsilon_provider:
            :py:class:`openquake.risk.job.EpsilonProvider` object

        :returns: 2-tuple of the following data:
            * 1-dimensional :py:class:`numpy.ndarray` of loss values for this
                region block (again, 1 value per realization)

            * list of 2-tuples containing site, loss, and asset
                information.

                The first element of each 2-tuple shall be a
                :py:class:`openquake.shapes.Site` object, which represents the
                geographical location of the asset loss.

                The second element shall be a list of
                2-tuples of dicts representing the loss and asset data (in that
                order).

                Example::

                    [(<Site(-117.0, 38.0)>, [
                        ({'mean_loss': 200.0, 'stddev_loss': 100},
                            {'assetID': 'a171'}),
                        ({'mean_loss': 200.0, 'stddev_loss': 100},
                            {'assetID': 'a187'})
                    ]),
                     (<Site(-118.0, 39.0)>, [
                        ({'mean_loss': 50, 'stddev_loss': 50.0},
                            {'assetID': 'a192'})
                    ])]
        """

        insured = kwargs["insured_losses"]
        vulnerability_model = kwargs["vuln_model"]
        seed, correlation_type = self._get_correlation_type()
        block = general.Block.from_kvs(self.job_ctxt.job_id, block_id)

        assets_getter = lambda site: general.BaseRiskCalculator.assets_at(self.job_ctxt.job_id, site)

        hazard_getter = lambda site: general.load_gmvs_at(
            self.job_ctxt.job_id, general.hazard_input_site(self.job_ctxt, site)
        )

        loss_map_data = {}

        calculator = api.scenario_risk(vulnerability_model, seed, correlation_type, insured)

        for asset_output in api.compute_on_sites(block.sites, assets_getter, hazard_getter, calculator):

            asset_site = shapes.Site(asset_output.asset.site.x, asset_output.asset.site.y)

            collect_block_data(
                loss_map_data,
                asset_site,
                (
                    {"mean_loss": asset_output.mean, "stddev_loss": asset_output.standard_deviation},
                    {"assetID": asset_output.asset.asset_ref},
                ),
            )

        return calculator.aggregate_losses, loss_map_data
Exemple #8
0
    def compute_risk(self, block_id, **kwargs):
        """
        This method will perform two distinct (but similar) computations and
        return a result for each computation. The computations are as follows:

        First:

        For a given block of sites, compute loss values for all assets in the
        block. This computation will yield a single loss value per realization
        for the region block.

        Second:

        For each asset in the given block of sites, we need compute loss
        (where loss = loss_ratio * asset_value) for each realization. This
        gives 1 loss value _per_ asset _per_ realization. We then need to take
        the mean & standard deviation.

        Other info:

        The GMF data for each realization is stored in the KVS by the preceding
        scenario hazard job.

        :param block_id: id of the region block data we need to pull from the
            KVS
        :type block_id: str
        :keyword vuln_model:
            dict of :py:class:`openquake.shapes.VulnerabilityFunction` objects,
            keyed by the vulnerability function name as a string
        :keyword epsilon_provider:
            :py:class:`openquake.risk.job.EpsilonProvider` object

        :returns: 2-tuple of the following data:
            * 1-dimensional :py:class:`numpy.ndarray` of loss values for this
                region block (again, 1 value per realization)

            * list of 2-tuples containing site, loss, and asset
                information.

                The first element of each 2-tuple shall be a
                :py:class:`openquake.shapes.Site` object, which represents the
                geographical location of the asset loss.

                The second element shall be a list of
                2-tuples of dicts representing the loss and asset data (in that
                order).

                Example::

                    [(<Site(-117.0, 38.0)>, [
                        ({'mean_loss': 200.0, 'stddev_loss': 100},
                            {'assetID': 'a171'}),
                        ({'mean_loss': 200.0, 'stddev_loss': 100},
                            {'assetID': 'a187'})
                    ]),
                     (<Site(-118.0, 39.0)>, [
                        ({'mean_loss': 50, 'stddev_loss': 50.0},
                            {'assetID': 'a192'})
                    ])]
        """

        vuln_model = kwargs["vuln_model"]
        insured_losses = kwargs["insured_losses"]
        epsilon_provider = general.EpsilonProvider(self.job_ctxt.params)
        block = general.Block.from_kvs(self.job_ctxt.job_id, block_id)

        block_losses = []
        loss_map_data = {}

        for site in block.sites:
            gmvs = {"IMLs": general.load_gmvs_at(
                    self.job_ctxt.job_id, general.hazard_input_site(
                    self.job_ctxt, site))}

            assets = general.BaseRiskCalculator.assets_at(
                self.job_ctxt.job_id, site)

            for asset in assets:
                vuln_function = vuln_model[asset.taxonomy]

                loss_ratios = general.compute_loss_ratios(
                    vuln_function, gmvs, epsilon_provider, asset)
                losses = loss_ratios * asset.value

                if insured_losses:
                    losses = general.compute_insured_losses(asset, losses)

                asset_site = shapes.Site(asset.site.x, asset.site.y)

                loss = ({
                    "mean_loss": numpy.mean(losses),
                    "stddev_loss": numpy.std(losses, ddof=1)}, {
                    "assetID": asset.asset_ref
                })

                block_losses.append(losses)
                collect_block_data(loss_map_data, asset_site, loss)

        sum_block_losses = reduce(lambda x, y: x + y, block_losses)
        return sum_block_losses, loss_map_data
Exemple #9
0
    def compute_risk(self, block_id, **kwargs):
        """
        This method will perform two distinct (but similar) computations and
        return a result for each computation. The computations are as follows:

        First:

        For a given block of sites, compute loss values for all assets in the
        block. This computation will yield a single loss value per realization
        for the region block.

        Second:

        For each asset in the given block of sites, we need compute loss
        (where loss = loss_ratio * asset_value) for each realization. This
        gives 1 loss value _per_ asset _per_ realization. We then need to take
        the mean & standard deviation.

        Other info:

        The GMF data for each realization is stored in the KVS by the preceding
        scenario hazard job.

        :param block_id: id of the region block data we need to pull from the
            KVS
        :type block_id: str
        :keyword vuln_model:
            dict of :py:class:`openquake.shapes.VulnerabilityFunction` objects,
            keyed by the vulnerability function name as a string
        :keyword epsilon_provider:
            :py:class:`openquake.risk.job.EpsilonProvider` object

        :returns: 2-tuple of the following data:
            * 1-dimensional :py:class:`numpy.ndarray` of loss values for this
                region block (again, 1 value per realization)

            * list of 2-tuples containing site, loss, and asset
                information.

                The first element of each 2-tuple shall be a
                :py:class:`openquake.shapes.Site` object, which represents the
                geographical location of the asset loss.

                The second element shall be a list of
                2-tuples of dicts representing the loss and asset data (in that
                order).

                Example::

                    [(<Site(-117.0, 38.0)>, [
                        ({'mean_loss': 200.0, 'stddev_loss': 100},
                            {'assetID': 'a171'}),
                        ({'mean_loss': 200.0, 'stddev_loss': 100},
                            {'assetID': 'a187'})
                    ]),
                     (<Site(-118.0, 39.0)>, [
                        ({'mean_loss': 50, 'stddev_loss': 50.0},
                            {'assetID': 'a192'})
                    ])]
        """

        vuln_model = kwargs['vuln_model']
        epsilon_provider = general.EpsilonProvider(self.job_ctxt.params)
        block = general.Block.from_kvs(self.job_ctxt.job_id, block_id)

        loss_data = {}

        # used to sum the losses for the whole block
        sum_per_gmf = SumPerGroundMotionField(vuln_model, epsilon_provider)

        for site in block.sites:
            point = self.job_ctxt.region.grid.point_at(site)

            # the scientific functions used below
            # require the gmvs to be wrapped in a dict with a single key, IMLs
            gmvs = {'IMLs': general.load_gmvs_at(
                    self.job_ctxt.job_id, point)}

            assets = general.BaseRiskCalculator.assets_at(
                self.job_ctxt.job_id, site)

            for asset in assets:
                vuln_function = vuln_model[asset.taxonomy]

                asset_mean_loss = compute_mean_loss(
                    vuln_function, gmvs, epsilon_provider, asset)

                asset_stddev_loss = compute_stddev_loss(
                    vuln_function, gmvs, epsilon_provider, asset)

                asset_site = shapes.Site(asset.site.x, asset.site.y)

                loss = ({
                    'mean_loss': asset_mean_loss,
                    'stddev_loss': asset_stddev_loss}, {
                    'assetID': asset.asset_ref
                })

                sum_per_gmf.add(gmvs, asset)
                collect_block_data(loss_data, asset_site, loss)

        return sum_per_gmf.losses, loss_data
Exemple #10
0
    def compute_risk(self, block_id, **kwargs):
        """
        This method will perform two distinct (but similar) computations and
        return a result for each computation. The computations are as follows:

        First:

        For a given block of sites, compute loss values for all assets in the
        block. This computation will yield a single loss value per realization
        for the region block.

        Second:

        For each asset in the given block of sites, we need compute loss
        (where loss = loss_ratio * asset_value) for each realization. This
        gives 1 loss value _per_ asset _per_ realization. We then need to take
        the mean & standard deviation.

        Other info:

        The GMF data for each realization is stored in the KVS by the preceding
        scenario hazard job.

        :param block_id: id of the region block data we need to pull from the
            KVS
        :type block_id: str
        :keyword vuln_model:
            dict of :py:class:`openquake.shapes.VulnerabilityFunction` objects,
            keyed by the vulnerability function name as a string
        :keyword epsilon_provider:
            :py:class:`openquake.risk.job.EpsilonProvider` object

        :returns: 2-tuple of the following data:
            * 1-dimensional :py:class:`numpy.ndarray` of loss values for this
                region block (again, 1 value per realization)

            * list of 2-tuples containing site, loss, and asset
                information.

                The first element of each 2-tuple shall be a
                :py:class:`openquake.shapes.Site` object, which represents the
                geographical location of the asset loss.

                The second element shall be a list of
                2-tuples of dicts representing the loss and asset data (in that
                order).

                Example::

                    [(<Site(-117.0, 38.0)>, [
                        ({'mean_loss': 200.0, 'stddev_loss': 100},
                            {'assetID': 'a171'}),
                        ({'mean_loss': 200.0, 'stddev_loss': 100},
                            {'assetID': 'a187'})
                    ]),
                     (<Site(-118.0, 39.0)>, [
                        ({'mean_loss': 50, 'stddev_loss': 50.0},
                            {'assetID': 'a192'})
                    ])]
        """

        vuln_model = kwargs["vuln_model"]
        insured_losses = kwargs["insured_losses"]
        epsilon_provider = general.EpsilonProvider(self.job_ctxt.params)
        block = general.Block.from_kvs(self.job_ctxt.job_id, block_id)

        block_losses = []
        loss_map_data = {}

        for site in block.sites:
            gmvs = {
                "IMLs":
                general.load_gmvs_at(
                    self.job_ctxt.job_id,
                    general.hazard_input_site(self.job_ctxt, site))
            }

            assets = general.BaseRiskCalculator.assets_at(
                self.job_ctxt.job_id, site)

            for asset in assets:
                vuln_function = vuln_model[asset.taxonomy]

                loss_ratios = general.compute_loss_ratios(
                    vuln_function, gmvs, epsilon_provider, asset)
                losses = loss_ratios * asset.value

                if insured_losses:
                    losses = general.compute_insured_losses(asset, losses)

                asset_site = shapes.Site(asset.site.x, asset.site.y)

                loss = ({
                    "mean_loss": numpy.mean(losses),
                    "stddev_loss": numpy.std(losses, ddof=1)
                }, {
                    "assetID": asset.asset_ref
                })

                block_losses.append(losses)
                collect_block_data(loss_map_data, asset_site, loss)

        sum_block_losses = reduce(lambda x, y: x + y, block_losses)
        return sum_block_losses, loss_map_data