def test_hazard_input_is_the_cell_center(self): # when `COMPUTE_HAZARD_AT_ASSETS_LOCATIONS` is not specified, # the hazard must be looked up on the center of the cell # where the given site falls in params = {config.INPUT_REGION: \ "1.0, 1.0, 2.0, 1.0, 2.0, 2.0, 1.0, 2.0", config.REGION_GRID_SPACING: 0.5} job_ctxt = engine.JobContext(params, None) self.assertEqual(shapes.Site(1.0, 1.0), hazard_input_site(job_ctxt, shapes.Site(1.2, 1.2))) self.assertEqual(shapes.Site(1.5, 1.5), hazard_input_site(job_ctxt, shapes.Site(1.6, 1.6)))
def test_hazard_input_is_the_cell_center(self): # when `COMPUTE_HAZARD_AT_ASSETS_LOCATIONS` is not specified, # the hazard must be looked up on the center of the cell # where the given site falls in params = {config.INPUT_REGION: \ "1.0, 1.0, 2.0, 1.0, 2.0, 2.0, 1.0, 2.0", config.REGION_GRID_SPACING: 0.5} job_ctxt = engine.JobContext(params, None) self.assertEqual(shapes.Site(1.0, 1.0), hazard_input_site( job_ctxt, shapes.Site(1.2, 1.2))) self.assertEqual(shapes.Site(1.5, 1.5), hazard_input_site( job_ctxt, shapes.Site(1.6, 1.6)))
def _compute_loss(self, block_id): """ Calculate and store in the kvs the loss data. """ block = general.Block.from_kvs(self.job_ctxt.job_id, block_id) vuln_curves = vulnerability.load_vuln_model_from_kvs( self.job_ctxt.job_id) for site in block.sites: point = self.job_ctxt.region.grid.point_at(site) hazard_curve = self._get_db_curve( general.hazard_input_site(self.job_ctxt, site)) assets = general.BaseRiskCalculator.assets_at( self.job_ctxt.job_id, site) for asset in assets: loss_ratio_curve = self.compute_loss_ratio_curve( point, asset, hazard_curve, vuln_curves) if loss_ratio_curve: loss_curve = self.compute_loss_curve( point, loss_ratio_curve, asset) for poe in conditional_loss_poes(self.job_ctxt.params): compute_conditional_loss(self.job_ctxt.job_id, point.column, point.row, loss_curve, asset, poe) return True
def _compute_loss(self, block_id): """ Calculate and store in the kvs the loss data. """ block = general.Block.from_kvs(self.job_ctxt.job_id, block_id) vuln_curves = vulnerability.load_vuln_model_from_kvs( self.job_ctxt.job_id) for site in block.sites: point = self.job_ctxt.region.grid.point_at(site) hazard_curve = self._get_db_curve( general.hazard_input_site(self.job_ctxt, site)) assets = general.BaseRiskCalculator.assets_at( self.job_ctxt.job_id, site) for asset in assets: loss_ratio_curve = self.compute_loss_ratio_curve( point, asset, hazard_curve, vuln_curves) if loss_ratio_curve: loss_curve = self.compute_loss_curve( point, loss_ratio_curve, asset) for poe in conditional_loss_poes(self.job_ctxt.params): compute_conditional_loss( self.job_ctxt.job_id, point.column, point.row, loss_curve, asset, poe) return True
def hazard_getter(site): point = self.job_ctxt.region.grid.point_at(site) gmvs = self._get_gmvs_at(general.hazard_input_site( self.job_ctxt, site)) gmf = {"IMLs": gmvs, "TSES": self._tses(), "TimeSpan": self._time_span()} return point, gmf
def _compute_loss(self, block_id): """ Calculate and store in the kvs the loss data. """ block = Block.from_kvs(self.job_ctxt.job_id, block_id) vuln_curves = vulnerability.load_vuln_model_from_kvs(self.job_ctxt.job_id) lrem_steps = self.job_ctxt.oq_job_profile.lrem_steps_per_interval loss_poes = conditional_loss_poes(self.job_ctxt.params) assets_getter = lambda site: BaseRiskCalculator.assets_at(self.job_ctxt.job_id, site) hazard_getter = lambda site: ( self.job_ctxt.region.grid.point_at(site), self._get_db_curve(hazard_input_site(self.job_ctxt, site)), ) def on_asset_complete(asset, point, loss_ratio_curve, loss_curve, loss_conditionals): loss_key = kvs.tokens.loss_curve_key(self.job_ctxt.job_id, point.row, point.column, asset.asset_ref) kvs.get_client().set(loss_key, loss_curve.to_json()) for poe, loss in loss_conditionals.items(): key = kvs.tokens.loss_key(self.job_ctxt.job_id, point.row, point.column, asset.asset_ref, poe) kvs.get_client().set(key, loss) loss_ratio_key = kvs.tokens.loss_ratio_key(self.job_ctxt.job_id, point.row, point.column, asset.asset_ref) kvs.get_client().set(loss_ratio_key, loss_ratio_curve.to_json()) classical.compute( block.sites, assets_getter, vuln_curves, hazard_getter, lrem_steps, loss_poes, on_asset_complete )
def hazard_getter(site): "Compute loss curve basing on GMF data" gmvs = self._get_gmvs_at(general.hazard_input_site( self.job_ctxt, site)) return {"IMLs": gmvs, "TSES": self._tses(), "TimeSpan": self._time_span()}
def get_loss_curve(site, vuln_function, asset): "Compute loss curve basing on hazard curve" job_profile = self.job_ctxt.oq_job_profile hazard_curve = self._get_db_curve( general.hazard_input_site(self.job_ctxt, site)) loss_ratio_curve = compute_loss_ratio_curve( vuln_function, hazard_curve, job_profile.lrem_steps_per_interval) return compute_loss_curve(loss_ratio_curve, asset.value)
def test_hazard_input_is_the_exposure_site(self): # when `COMPUTE_HAZARD_AT_ASSETS_LOCATIONS` is specified, # the hazard must be looked up on the same risk location # (the input parameter of the function) params = {config.COMPUTE_HAZARD_AT_ASSETS: True} job_ctxt = engine.JobContext(params, None) self.assertEqual(shapes.Site(1.0, 1.0), hazard_input_site(job_ctxt, shapes.Site(1.0, 1.0)))
def test_hazard_input_is_the_exposure_site(self): # when `COMPUTE_HAZARD_AT_ASSETS_LOCATIONS` is specified, # the hazard must be looked up on the same risk location # (the input parameter of the function) params = {config.COMPUTE_HAZARD_AT_ASSETS: True} job_ctxt = engine.JobContext(params, None) self.assertEqual(shapes.Site(1.0, 1.0), hazard_input_site( job_ctxt, shapes.Site(1.0, 1.0)))
def _load_ground_motion_field(self, site): """ Retrive the ground motion field for the specified site. """ ground_motion_values = self._get_gmvs_at(general.hazard_input_site( self.job_ctxt, site)) return {"IMLs": ground_motion_values, "TSES": self._tses(), "TimeSpan": self._time_span()}
def _load_ground_motion_field(self, site): """ Retrive the ground motion field for the specified site. """ ground_motion_values = self._get_gmvs_at( general.hazard_input_site(self.job_ctxt, site)) return { "IMLs": ground_motion_values, "TSES": self._tses(), "TimeSpan": self._time_span() }
def _compute_loss(self, block_id): """Compute risk for a block of sites, that means: * loss ratio curves * loss curves * conditional losses * (partial) aggregate loss curve """ self.vuln_curves = vulnerability.load_vuln_model_from_kvs( self.job_ctxt.job_id) block = general.Block.from_kvs(self.job_ctxt.job_id, block_id) # aggregate the losses for this block aggregate_curve = general.AggregateLossCurve() for site in block.sites: point = self.job_ctxt.region.grid.point_at(site) gmvs = self._get_gmvs_at(general.hazard_input_site( self.job_ctxt, site)) gmf = {"IMLs": gmvs, "TSES": self._tses(), "TimeSpan": self._time_span()} assets = general.BaseRiskCalculator.assets_at( self.job_ctxt.job_id, site) for asset in assets: # loss ratios, used both to produce the curve # and to aggregate the losses loss_ratios = self.compute_loss_ratios(asset, gmf) loss_ratio_curve = self.compute_loss_ratio_curve( point.column, point.row, asset, gmf, loss_ratios) aggregate_curve.append(loss_ratios * asset.value) if loss_ratio_curve: loss_curve = self.compute_loss_curve( point.column, point.row, loss_ratio_curve, asset) for loss_poe in general.conditional_loss_poes( self.job_ctxt.params): general.compute_conditional_loss( self.job_ctxt.job_id, point.column, point.row, loss_curve, asset, loss_poe) return aggregate_curve.losses
def _compute_bcr(self, block_id): """ Calculate and store in the kvs the benefit-cost ratio data for block. A value is stored with key :func:`openquake.kvs.tokens.bcr_block_key`. See :func:`openquake.risk.job.general.compute_bcr_for_block` for result data structure spec. """ result = defaultdict(list) block = Block.from_kvs(self.job_ctxt.job_id, block_id) vulnerability_model_original = vulnerability.load_vuln_model_from_kvs( self.job_ctxt.job_id) vulnerability_model_retrofitted = ( vulnerability.load_vuln_model_from_kvs( self.job_ctxt.job_id, retrofitted=True)) steps = self.job_ctxt.oq_job_profile.lrem_steps_per_interval assets_getter = lambda site: BaseRiskCalculator.assets_at( self.job_ctxt.job_id, site) hazard_getter = lambda site: ( self._get_db_curve(hazard_input_site(self.job_ctxt, site))) bcr = api.bcr(api.classical(vulnerability_model_original, steps=steps), api.classical(vulnerability_model_retrofitted, steps=steps), float(self.job_ctxt.params["INTEREST_RATE"]), float(self.job_ctxt.params["ASSET_LIFE_EXPECTANCY"])) for asset_output in api.compute_on_sites( block.sites, assets_getter, hazard_getter, bcr): asset = asset_output.asset result[(asset.site.x, asset.site.y)].append(({ "bcr": asset_output.bcr, "eal_original": asset_output.eal_original, "eal_retrofitted": asset_output.eal_retrofitted}, asset.asset_ref)) bcr = result.items() bcr_block_key = kvs.tokens.bcr_block_key( self.job_ctxt.job_id, block_id) kvs.set_value_json_encoded(bcr_block_key, bcr) LOGGER.debug("bcr result for block %s: %r", block_id, bcr) return True
def _compute_loss(self, block_id): """ Calculate and store in the kvs the loss data. """ block = Block.from_kvs(self.job_ctxt.job_id, block_id) vulnerability_model = vulnerability.load_vuln_model_from_kvs( self.job_ctxt.job_id) steps = self.job_ctxt.oq_job_profile.lrem_steps_per_interval assets_getter = lambda site: BaseRiskCalculator.assets_at( self.job_ctxt.job_id, site) hazard_getter = lambda site: ( self._get_db_curve(hazard_input_site(self.job_ctxt, site))) calculator = api.conditional_losses( conditional_loss_poes(self.job_ctxt.params), api.classical(vulnerability_model, steps=steps)) for asset_output in api.compute_on_sites(block.sites, assets_getter, hazard_getter, calculator): location = asset_output.asset.site point = self.job_ctxt.region.grid.point_at( shapes.Site(location.x, location.y)) loss_key = kvs.tokens.loss_curve_key( self.job_ctxt.job_id, point.row, point.column, asset_output.asset.asset_ref) kvs.get_client().set(loss_key, asset_output.loss_curve.to_json()) loss_ratio_key = kvs.tokens.loss_ratio_key(self.job_ctxt.job_id, point.row, point.column, asset_output.asset.asset_ref) kvs.get_client().set(loss_ratio_key, asset_output.loss_ratio_curve.to_json()) for poe, loss in asset_output.conditional_losses.items(): key = kvs.tokens.loss_key( self.job_ctxt.job_id, point.row, point.column, asset_output.asset.asset_ref, poe) kvs.get_client().set(key, loss)
def compute_risk(self, block_id, **kwargs): """ Compute the results for a single block. Currently we support the computation of: * damage distributions per asset * damage distributions per building taxonomy * total damage distribution * collapse maps :param block_id: id of the region block data. :type block_id: integer :keyword fmodel: fragility model associated to this computation. :type fmodel: instance of :py:class:`openquake.db.models.FragilityModel` :return: the sum of the fractions (for each damage state) per asset taxonomy for the computed block. :rtype: `dict` where each key is a string representing a taxonomy and each value is the sum of fractions of all the assets related to that taxonomy (represented as a 2d `numpy.array`) """ fragility_model = _fm(self.job_ctxt.oq_job) fragility_functions = fragility_model.functions_by_taxonomy() block = general.Block.from_kvs(self.job_ctxt.job_id, block_id) hazard_getter = lambda site: general.load_gmvs_at( self.job_ctxt.job_id, general.hazard_input_site( self.job_ctxt, site)) assets_getter = lambda site: general.BaseRiskCalculator.assets_at( self.job_ctxt.job_id, site) calculator = api.scenario_damage(fragility_model, fragility_functions) for asset_output in api.compute_on_sites(block.sites, assets_getter, hazard_getter, calculator): self._store_cmap(asset_output.asset, asset_output.collapse_map) self._store_dda(asset_output.asset, scenario_damage.damage_states(fragility_model), asset_output.damage_distribution_asset) return calculator.damage_distribution_by_taxonomy
def get_loss_curve(site, vuln_function, asset): "Compute loss curve basing on GMF data" gmvs = self._get_gmvs_at(general.hazard_input_site( self.job_ctxt, site)) gmf_slice = {"IMLs": gmvs, "TSES": self._tses(), "TimeSpan": self._time_span()} loss_ratios = general.compute_loss_ratios( vuln_function, gmf_slice, epsilon_provider, asset) loss_ratio_curve = general.compute_loss_ratio_curve( vuln_function, gmf_slice, epsilon_provider, asset, self.job_ctxt.oq_job_profile.loss_histogram_bins, loss_ratios=loss_ratios) aggregate_curve.append(loss_ratios * asset.value) return loss_ratio_curve.rescale_abscissae(asset.value)
def get_loss_curve(site, vuln_function, asset): "Compute loss curve basing on GMF data" gmvs = self._get_gmvs_at( general.hazard_input_site(self.job_ctxt, site)) gmf_slice = { "IMLs": gmvs, "TSES": self._tses(), "TimeSpan": self._time_span() } loss_ratios = general.compute_loss_ratios(vuln_function, gmf_slice, epsilon_provider, asset) loss_ratio_curve = general.compute_loss_ratio_curve( vuln_function, gmf_slice, epsilon_provider, asset, self.job_ctxt.oq_job_profile.loss_histogram_bins, loss_ratios=loss_ratios) aggregate_curve.append(loss_ratios * asset.value) return loss_ratio_curve.rescale_abscissae(asset.value)
def _compute_bcr(self, block_id): """ Calculate and store in the kvs the benefit-cost ratio data for block. A value is stored with key :func:`openquake.kvs.tokens.bcr_block_key`. See :func:`openquake.risk.job.general.compute_bcr_for_block` for result data structure spec. """ job_ctxt = self.job_ctxt job_id = job_ctxt.job_id block = Block.from_kvs(job_id, block_id) result = defaultdict(list) def on_asset_complete(asset, bcr, eal_original, eal_retrofitted): result[(asset.site.x, asset.site.y)].append( ({"bcr": bcr, "eal_original": eal_original, "eal_retrofitted": eal_retrofitted}, asset.asset_ref) ) benefit_cost_ratio.compute( block.sites, lambda site: BaseRiskCalculator.assets_at(job_id, site), vulnerability.load_vuln_model_from_kvs(job_id), vulnerability.load_vuln_model_from_kvs(job_id, retrofitted=True), lambda site: self._get_db_curve(hazard_input_site(self.job_ctxt, site)), self.job_ctxt.oq_job_profile.lrem_steps_per_interval, float(job_ctxt.params["INTEREST_RATE"]), float(job_ctxt.params["ASSET_LIFE_EXPECTANCY"]), on_asset_complete, ) bcr = result.items() bcr_block_key = kvs.tokens.bcr_block_key(job_ctxt.job_id, block_id) kvs.set_value_json_encoded(bcr_block_key, bcr) LOGGER.debug("bcr result for block %s: %r", block_id, bcr) return True
def compute_risk(self, block_id, **kwargs): """ This method will perform two distinct (but similar) computations and return a result for each computation. The computations are as follows: First: For a given block of sites, compute loss values for all assets in the block. This computation will yield a single loss value per realization for the region block. Second: For each asset in the given block of sites, we need compute loss (where loss = loss_ratio * asset_value) for each realization. This gives 1 loss value _per_ asset _per_ realization. We then need to take the mean & standard deviation. Other info: The GMF data for each realization is stored in the KVS by the preceding scenario hazard job. :param block_id: id of the region block data we need to pull from the KVS :type block_id: str :keyword vuln_model: dict of :py:class:`openquake.shapes.VulnerabilityFunction` objects, keyed by the vulnerability function name as a string :keyword epsilon_provider: :py:class:`openquake.risk.job.EpsilonProvider` object :returns: 2-tuple of the following data: * 1-dimensional :py:class:`numpy.ndarray` of loss values for this region block (again, 1 value per realization) * list of 2-tuples containing site, loss, and asset information. The first element of each 2-tuple shall be a :py:class:`openquake.shapes.Site` object, which represents the geographical location of the asset loss. The second element shall be a list of 2-tuples of dicts representing the loss and asset data (in that order). Example:: [(<Site(-117.0, 38.0)>, [ ({'mean_loss': 200.0, 'stddev_loss': 100}, {'assetID': 'a171'}), ({'mean_loss': 200.0, 'stddev_loss': 100}, {'assetID': 'a187'}) ]), (<Site(-118.0, 39.0)>, [ ({'mean_loss': 50, 'stddev_loss': 50.0}, {'assetID': 'a192'}) ])] """ vuln_model = kwargs["vuln_model"] insured_losses = kwargs["insured_losses"] epsilon_provider = general.EpsilonProvider(self.job_ctxt.params) block = general.Block.from_kvs(self.job_ctxt.job_id, block_id) block_losses = [] loss_map_data = {} for site in block.sites: gmvs = {"IMLs": general.load_gmvs_at( self.job_ctxt.job_id, general.hazard_input_site( self.job_ctxt, site))} assets = general.BaseRiskCalculator.assets_at( self.job_ctxt.job_id, site) for asset in assets: vuln_function = vuln_model[asset.taxonomy] loss_ratios = general.compute_loss_ratios( vuln_function, gmvs, epsilon_provider, asset) losses = loss_ratios * asset.value if insured_losses: losses = general.compute_insured_losses(asset, losses) asset_site = shapes.Site(asset.site.x, asset.site.y) loss = ({ "mean_loss": numpy.mean(losses), "stddev_loss": numpy.std(losses, ddof=1)}, { "assetID": asset.asset_ref }) block_losses.append(losses) collect_block_data(loss_map_data, asset_site, loss) sum_block_losses = reduce(lambda x, y: x + y, block_losses) return sum_block_losses, loss_map_data
def compute_risk(self, block_id, **kwargs): """ This method will perform two distinct (but similar) computations and return a result for each computation. The computations are as follows: First: For a given block of sites, compute loss values for all assets in the block. This computation will yield a single loss value per realization for the region block. Second: For each asset in the given block of sites, we need compute loss (where loss = loss_ratio * asset_value) for each realization. This gives 1 loss value _per_ asset _per_ realization. We then need to take the mean & standard deviation. Other info: The GMF data for each realization is stored in the KVS by the preceding scenario hazard job. :param block_id: id of the region block data we need to pull from the KVS :type block_id: str :keyword vuln_model: dict of :py:class:`openquake.shapes.VulnerabilityFunction` objects, keyed by the vulnerability function name as a string :keyword epsilon_provider: :py:class:`openquake.risk.job.EpsilonProvider` object :returns: 2-tuple of the following data: * 1-dimensional :py:class:`numpy.ndarray` of loss values for this region block (again, 1 value per realization) * list of 2-tuples containing site, loss, and asset information. The first element of each 2-tuple shall be a :py:class:`openquake.shapes.Site` object, which represents the geographical location of the asset loss. The second element shall be a list of 2-tuples of dicts representing the loss and asset data (in that order). Example:: [(<Site(-117.0, 38.0)>, [ ({'mean_loss': 200.0, 'stddev_loss': 100}, {'assetID': 'a171'}), ({'mean_loss': 200.0, 'stddev_loss': 100}, {'assetID': 'a187'}) ]), (<Site(-118.0, 39.0)>, [ ({'mean_loss': 50, 'stddev_loss': 50.0}, {'assetID': 'a192'}) ])] """ vuln_model = kwargs["vuln_model"] insured_losses = kwargs["insured_losses"] epsilon_provider = general.EpsilonProvider(self.job_ctxt.params) block = general.Block.from_kvs(self.job_ctxt.job_id, block_id) block_losses = [] loss_map_data = {} for site in block.sites: gmvs = { "IMLs": general.load_gmvs_at( self.job_ctxt.job_id, general.hazard_input_site(self.job_ctxt, site)) } assets = general.BaseRiskCalculator.assets_at( self.job_ctxt.job_id, site) for asset in assets: vuln_function = vuln_model[asset.taxonomy] loss_ratios = general.compute_loss_ratios( vuln_function, gmvs, epsilon_provider, asset) losses = loss_ratios * asset.value if insured_losses: losses = general.compute_insured_losses(asset, losses) asset_site = shapes.Site(asset.site.x, asset.site.y) loss = ({ "mean_loss": numpy.mean(losses), "stddev_loss": numpy.std(losses, ddof=1) }, { "assetID": asset.asset_ref }) block_losses.append(losses) collect_block_data(loss_map_data, asset_site, loss) sum_block_losses = reduce(lambda x, y: x + y, block_losses) return sum_block_losses, loss_map_data
def compute_risk(self, block_id, **kwargs): """ Compute the results for a single block. Currently we support the computation of: * damage distributions per asset * damage distributions per building taxonomy * total damage distribution * collapse maps :param block_id: id of the region block data. :type block_id: integer :keyword fmodel: fragility model associated to this computation. :type fmodel: instance of :py:class:`openquake.db.models.FragilityModel` :return: the sum of the fractions (for each damage state) per asset taxonomy for the computed block. :rtype: `dict` where each key is a string representing a taxonomy and each value is the sum of fractions of all the assets related to that taxonomy (represented as a 2d `numpy.array`) """ fm = kwargs["fmodel"] block = general.Block.from_kvs(self.job_ctxt.job_id, block_id) fset = fm.ffd_set if fm.format == "discrete" else fm.ffc_set # fractions of each damage state per building taxonomy # for the given block ddt_fractions = {} for site in block.sites: gmf = general.load_gmvs_at(self.job_ctxt.job_id, general.hazard_input_site( self.job_ctxt, site)) assets = general.BaseRiskCalculator.assets_at( self.job_ctxt.job_id, site) for asset in assets: funcs = fset.filter( taxonomy=asset.taxonomy).order_by("lsi") assert len(funcs) > 0, ("no limit states associated " "with taxonomy %s of asset %s.") % ( asset.taxonomy, asset.asset_ref) fractions = compute_gmf_fractions( gmf, funcs) * asset.number_of_units current_fractions = ddt_fractions.get( asset.taxonomy, numpy.zeros((len(gmf), len(funcs) + 1))) ddt_fractions[asset.taxonomy] = current_fractions + fractions self._store_dda(fractions, asset, fm) # the collapse map needs the fractions # for each ground motion value of the # last damage state (the last column) self._store_cmap(fractions[:, -1], asset) return ddt_fractions
def compute_risk(self, block_id, **kwargs): """ This method will perform two distinct (but similar) computations and return a result for each computation. The computations are as follows: First: For a given block of sites, compute loss values for all assets in the block. This computation will yield a single loss value per realization for the region block. Second: For each asset in the given block of sites, we need compute loss (where loss = loss_ratio * asset_value) for each realization. This gives 1 loss value _per_ asset _per_ realization. We then need to take the mean & standard deviation. Other info: The GMF data for each realization is stored in the KVS by the preceding scenario hazard job. :param block_id: id of the region block data we need to pull from the KVS :type block_id: str :keyword vuln_model: dict of :py:class:`openquake.shapes.VulnerabilityFunction` objects, keyed by the vulnerability function name as a string :keyword epsilon_provider: :py:class:`openquake.risk.job.EpsilonProvider` object :returns: 2-tuple of the following data: * 1-dimensional :py:class:`numpy.ndarray` of loss values for this region block (again, 1 value per realization) * list of 2-tuples containing site, loss, and asset information. The first element of each 2-tuple shall be a :py:class:`openquake.shapes.Site` object, which represents the geographical location of the asset loss. The second element shall be a list of 2-tuples of dicts representing the loss and asset data (in that order). Example:: [(<Site(-117.0, 38.0)>, [ ({'mean_loss': 200.0, 'stddev_loss': 100}, {'assetID': 'a171'}), ({'mean_loss': 200.0, 'stddev_loss': 100}, {'assetID': 'a187'}) ]), (<Site(-118.0, 39.0)>, [ ({'mean_loss': 50, 'stddev_loss': 50.0}, {'assetID': 'a192'}) ])] """ insured = kwargs["insured_losses"] vulnerability_model = kwargs["vuln_model"] seed, correlation_type = self._get_correlation_type() block = general.Block.from_kvs(self.job_ctxt.job_id, block_id) assets_getter = lambda site: general.BaseRiskCalculator.assets_at(self.job_ctxt.job_id, site) hazard_getter = lambda site: general.load_gmvs_at( self.job_ctxt.job_id, general.hazard_input_site(self.job_ctxt, site) ) loss_map_data = {} calculator = api.scenario_risk(vulnerability_model, seed, correlation_type, insured) for asset_output in api.compute_on_sites(block.sites, assets_getter, hazard_getter, calculator): asset_site = shapes.Site(asset_output.asset.site.x, asset_output.asset.site.y) collect_block_data( loss_map_data, asset_site, ( {"mean_loss": asset_output.mean, "stddev_loss": asset_output.standard_deviation}, {"assetID": asset_output.asset.asset_ref}, ), ) return calculator.aggregate_losses, loss_map_data
def compute_risk(self, block_id, **kwargs): """ Compute the results for a single block. Currently we support the computation of: * damage distributions per asset * damage distributions per building taxonomy * total damage distribution * collapse maps :param block_id: id of the region block data. :type block_id: integer :keyword fmodel: fragility model associated to this computation. :type fmodel: instance of :py:class:`openquake.db.models.FragilityModel` :return: the sum of the fractions (for each damage state) per asset taxonomy for the computed block. :rtype: `dict` where each key is a string representing a taxonomy and each value is the sum of fractions of all the assets related to that taxonomy (represented as a 2d `numpy.array`) """ fm = kwargs["fmodel"] block = general.Block.from_kvs(self.job_ctxt.job_id, block_id) fset = fm.ffd_set if fm.format == "discrete" else fm.ffc_set # fractions of each damage state per building taxonomy # for the given block ddt_fractions = {} for site in block.sites: gmf = general.load_gmvs_at( self.job_ctxt.job_id, general.hazard_input_site(self.job_ctxt, site)) assets = general.BaseRiskCalculator.assets_at( self.job_ctxt.job_id, site) for asset in assets: funcs = fset.filter(taxonomy=asset.taxonomy).order_by("lsi") assert len(funcs) > 0, ("no limit states associated " "with taxonomy %s of asset %s.") % ( asset.taxonomy, asset.asset_ref) fractions = compute_gmf_fractions( gmf, funcs) * asset.number_of_units current_fractions = ddt_fractions.get( asset.taxonomy, numpy.zeros((len(gmf), len(funcs) + 1))) ddt_fractions[asset.taxonomy] = current_fractions + fractions self._store_dda(fractions, asset, fm) # the collapse map needs the fractions # for each ground motion value of the # last damage state (the last column) self._store_cmap(fractions[:, -1], asset) return ddt_fractions
def hazard_getter(site): gmvs = self._get_gmvs_at(general.hazard_input_site( self.job_ctxt, site)) return {"IMLs": gmvs, "TSES": self._tses(), "TimeSpan": self._time_span()}