def execute(self): """ Method responsible for the distribution strategy. It divides the considered exposure into chunks of homogeneous assets (i.e. having the same taxonomy). """ # create a Gmf output for each realization self.hcalc = EBHC(self.hc.oqjob) self.hcalc.source_model_lt = SourceModelLogicTree.from_hc(self.hc) with db.transaction.commit_on_success(using='job_init'): # TODO: think about how to remove the need for .delete() # one should retrieve only the latest realizations # for a given hazard calculation; alternatively, the # realizations should be associated to RiskCalculation, # not to HazardCalculation models.LtRealization.objects.filter( lt_model__hazard_calculation=self.hc).delete() self.hcalc.initialize_realizations() for rlz in self.hcalc._get_realizations(): output = models.Output.objects.create( oq_job=self.job, display_name='GMF rlz-%s' % rlz.id, output_type='gmf') models.Gmf.objects.create(output=output, lt_realization=rlz) self.compute_risk()
class EventBasedFRRiskCalculator(core.EventBasedRiskCalculator): def pre_execute(self): """ Inherited from core.EventBasedRiskCalculator.pre_execute. Enforces no correlation, both on GMFs and assets. """ correl_model = models.get_correl_model(self.job) assert correl_model is None, correl_model assert not self.rc.asset_correlation, self.rc.asset_correlation core.EventBasedRiskCalculator.pre_execute(self) @EnginePerformanceMonitor.monitor def execute(self): """ Method responsible for the distribution strategy. It divides the considered exposure into chunks of homogeneous assets (i.e. having the same taxonomy). """ # create a Gmf output for each realization self.hcalc = EBHC(self.hc.oqjob) self.hcalc.source_model_lt = SourceModelLogicTree.from_hc(self.hc) with db.transaction.commit_on_success(using='job_init'): # TODO: think about how to remove the need for .delete() # one should retrieve only the latest realizations # for a given hazard calculation; alternatively, the # realizations should be associated to RiskCalculation, # not to HazardCalculation models.LtRealization.objects.filter( lt_model__hazard_calculation=self.hc).delete() self.hcalc.initialize_realizations() for rlz in self.hcalc._get_realizations(): output = models.Output.objects.create( oq_job=self.job, display_name='GMF rlz-%s' % rlz.id, output_type='gmf') models.Gmf.objects.create(output=output, lt_realization=rlz) self.compute_risk() def compute_risk(self): """ Generate the GMFs and optionally the hazard curves too, then compute the risk. """ getter_builders = [] risk_models = [] with self.monitor('associating assets<->sites'): for risk_model in self.risk_models.itervalues(): logs.LOG.info('associating assets<->sites for taxonomy %s', risk_model.taxonomy) try: with db.transaction.commit_on_success(using='job_init'): gbuilder = GetterBuilder(self.rc, risk_model.taxonomy) getter_builders.append(gbuilder) risk_models.append(risk_model) except AssetSiteAssociationError as e: logs.LOG.warn(str(e)) continue # notice that here the commit is really needed, since # combine_builders save the loss curve containers on the db with db.transaction.commit_on_success(using='job_init'): outputdict = writers.combine_builders( [ob(self) for ob in self.output_builders]) args = [] # compute the risk by splitting by sites for sites in split_site_collection( self.hc.site_collection, self.concurrent_tasks): args.append((self.job.id, sites, self.rc, risk_models, getter_builders, outputdict, self.calculator_parameters)) self.acc = tasks.map_reduce(event_based_fr, args, self.agg_result, {})