def _set_up_outputs(self): # Set up test Output records self.uhs_output = models.Output(owner=self.uhs_job.owner, oq_job=self.uhs_job, db_backed=True, output_type='uh_spectra') self.uhs_output.save() self.cpsha_hc_output = models.Output(owner=self.cpsha_job_fail.owner, oq_job=self.cpsha_job_fail, db_backed=True, output_type='hazard_curve') self.cpsha_hc_output.save() self.cpsha_mean_hc_output = models.Output( owner=self.cpsha_job_fail.owner, oq_job=self.cpsha_job_fail, db_backed=True, output_type='hazard_curve') self.cpsha_mean_hc_output.save() self.cpsha_lc_output = models.Output(owner=self.cpsha_job_fail.owner, oq_job=self.cpsha_job_fail, db_backed=True, output_type='loss_curve') self.cpsha_lc_output.save()
def setUpClass(cls): default_user = helpers.default_user() cls.job = models.OqJob(owner=default_user) cls.job.save() # dmg dist per asset cls.ddpa_output = models.Output( owner=default_user, oq_job=cls.job, display_name='Test dmg dist per asset', output_type='dmg_dist_per_asset', db_backed=True) cls.ddpa_output.save() cls.ddpa = models.DmgDistPerAsset( output=cls.ddpa_output, dmg_states=cls.DMG_STATES) cls.ddpa.save() # We also need some sample exposure data records (to satisfy the dmg # dist per asset FK). test_input = models.Input( owner=default_user, input_type='exposure', path='fake', size=0) test_input.save() i2j = models.Input2job(input=test_input, oq_job=cls.job) i2j.save() exp_model = models.ExposureModel( owner=default_user, input=test_input, name='test-exp-model', category='economic loss', stco_type='per_asset', stco_unit='CHF') exp_model.save() test_site = shapes.Site(3.14, 2.17) cls.exp_data = models.ExposureData( # Asset exposure_model=exp_model, asset_ref=helpers.random_string(), taxonomy=helpers.random_string(), number_of_units=37, site=test_site.point.to_wkt(), stco=1234.56) cls.exp_data.save() # dmg dist per taxonomy cls.ddpt_output = models.Output( owner=default_user, oq_job=cls.job, display_name='Test dmg dist per taxonomy', output_type='dmg_dist_per_taxonomy', db_backed=True) cls.ddpt_output.save() cls.ddpt = models.DmgDistPerTaxonomy( output=cls.ddpt_output, dmg_states=cls.DMG_STATES) cls.ddpt.save() # total dmg dist cls.ddt_output = models.Output( owner=default_user, oq_job=cls.job, display_name='Test dmg dist total', output_type='dmg_dist_total', db_backed=True) cls.ddt_output.save() cls.ddt = models.DmgDistTotal( output=cls.ddt_output, dmg_states=cls.DMG_STATES) cls.ddt.save()
def insert_output(self, output_type): """Insert an `uiapi.output` record for the job at hand.""" assert self.output is None job = models.OqJob.objects.get(id=self.oq_job_id) # al-maisan, Fri, 29 Jun 2012 17:36:35 +0200 # https://bugs.launchpad.net/openquake/+bug/1019317 # figure out why using a single output record for gmf_data breaks the # probablistic event-based risk calculator. if output_type == "gmf": one_or_none = [] else: one_or_none = models.Output.objects.filter(oq_job=job, display_name=basename( self.nrml_path), output_type=output_type) if len(one_or_none) == 1: self.output = one_or_none[0] LOGGER.info("using output = '%s'", self.output) else: self.output = models.Output(owner=job.owner, oq_job=job, db_backed=True, display_name=basename(self.nrml_path), output_type=output_type) self.output.save() LOGGER.info("creating output = '%s'", self.output)
def make_dist(self): output = models.Output( owner=self.job.owner, oq_job=self.job, display_name="", db_backed=True, output_type="dmg_dist_total") output.save() self.ddt = models.DmgDistTotal( output=output, dmg_states=self.damage_states) self.ddt.save()
def make_map(self): output = models.Output( owner=self.job.owner, oq_job=self.job, display_name="", db_backed=True, output_type="collapse_map") output.save() self.cm = models.CollapseMap( output=output, exposure_model=self.em) self.cm.save()
def make_dist(self): output = models.Output( owner=self.job.owner, oq_job=self.job, display_name="", db_backed=True, output_type="dmg_dist_per_asset") output.save() self.dda = models.DmgDistPerAsset( output=output, dmg_states=self.damage_states) self.dda.save()
def post_execute(self): """Perform the following post-execution actions: * Write loss curves to XML * Save the aggregate loss curve to the database * Write BCR output (NOTE: If BCR mode, none of the other artifacts will be written. Not all of these actions will be executed; this depends on the configuration of the job. """ if self.is_benefit_cost_ratio_mode(): self.write_output_bcr() return self.write_output() # Save the aggregate loss curve to the database: job = self.job_ctxt.oq_job agg_lc_display_name = ('Aggregate Loss Curve for calculation %s' % job.id) output = models.Output(oq_job=job, owner=job.owner, display_name=agg_lc_display_name, db_backed=True, output_type='agg_loss_curve') output.save() loss_curve = models.LossCurve(output=output, aggregate=True) loss_curve.save() agg_lc_data = models.AggregateLossCurveData( loss_curve=loss_curve, losses=self.agg_curve.x_values, poes=self.agg_curve.y_values) agg_lc_data.save()