def test_export_for_scenario(self): target_dir = tempfile.mkdtemp() try: cfg = helpers.get_data_path('scenario_hazard/job.ini') # run the calculation in process to create something to export os.environ['OQ_NO_DISTRIBUTE'] = '1' try: helpers.run_hazard_job(cfg) finally: del os.environ['OQ_NO_DISTRIBUTE'] job = models.OqJob.objects.latest('id') self.assertEqual(job.status, 'complete') outputs = export_core.get_outputs(job.id) self.assertEqual(1, len(outputs)) # 1 GMF gmf_outputs = outputs.filter(output_type='gmf_scenario') self.assertEqual(1, len(gmf_outputs)) exported_file = check_export(gmf_outputs[0].id, target_dir) # Check the file paths exist, is absolute, and the file isn't # empty. self._test_exported_file(exported_file) # Check for the correct number of GMFs in the file: tree = etree.parse(exported_file) self.assertEqual(20, number_of('nrml:gmf', tree)) finally: shutil.rmtree(target_dir)
def test_export_for_scenario(self): target_dir = tempfile.mkdtemp() try: cfg = helpers.get_data_path('scenario_hazard/job.ini') # run the calculation in process to create something to export os.environ['OQ_NO_DISTRIBUTE'] = '1' try: helpers.run_hazard_job(cfg) finally: del os.environ['OQ_NO_DISTRIBUTE'] job = models.OqJob.objects.latest('id') self.assertEqual(job.status, 'complete') outputs = export_core.get_outputs(job.id) self.assertEqual(1, len(outputs)) # 1 GMF gmf_outputs = outputs.filter(output_type='gmf_scenario') self.assertEqual(1, len(gmf_outputs)) exported_files = check_export(gmf_outputs[0].id, target_dir) self.assertEqual(1, len(exported_files)) # Check the file paths exist, is absolute, and the file isn't # empty. f = exported_files[0] self._test_exported_file(f) # Check for the correct number of GMFs in the file: tree = etree.parse(f) self.assertEqual(20, number_of('nrml:gmf', tree)) finally: shutil.rmtree(target_dir)
def get_hazard_job(self): if not self._get_queryset().exists(): warnings.warn("Computing Hazard input from scratch") job = helpers.run_hazard_job( self._test_path('job_haz.ini')) self.assertEqual('complete', job.status) else: warnings.warn("Using existing Hazard input") job = self._get_queryset().latest('oqjob__last_update').oqjob if self.save_load: # Close the opened transactions saved_calculation = save_hazards.main(job.hazard_calculation.id) # FIXME Here on, to avoid deadlocks due to stale # transactions, we commit all the opened transactions. We # should find who is responsible for the eventual opened # transaction connection = models.getcursor('job_init').connection if connection is not None: connection.commit() [load_calculation] = load_hazards.hazard_load( models.getcursor('admin').connection, saved_calculation) return models.OqJob.objects.get( hazard_calculation__id=load_calculation) else: return job
def test_complete_event_based_calculation_cycle(self): self._patch_calc() try: from openquake.hazardlib import calc from openquake.engine.calculators.hazard.event_based import core ses_mock = calc.stochastic.stochastic_event_set_poissonian gmf_mock = calc.gmf.ground_motion_fields save_rup_mock = core._save_ses_rupture save_gmf_mock = core._save_gmfs # run the calculation in process (to easy debugging) # and check the outputs os.environ["OQ_NO_DISTRIBUTE"] = "1" try: job = helpers.run_hazard_job(self.cfg) finally: del os.environ["OQ_NO_DISTRIBUTE"] hc = job.hazard_calculation rlz1, rlz2 = models.LtRealization.objects.filter(hazard_calculation=hc.id).order_by("ordinal") # check that the parameters are read correctly from the files self.assertEqual(hc.ses_per_logic_tree_path, 5) # check that we called the right number of times the patched # functions: 40 = 2 Lt * 4 sources * 5 ses = 8 tasks * 5 ses self.assertEqual(ses_mock.call_count, 40) self.assertEqual(save_rup_mock.call_count, 80) # 2 rupt per ses self.assertEqual(gmf_mock.call_count, 80) # 2 ruptures per ses self.assertEqual(save_gmf_mock.call_count, 40) # num_tasks * ses # Check the complete logic tree SES complete_lt_ses = models.SES.objects.get( ses_collection__output__oq_job=job.id, ses_collection__output__output_type="complete_lt_ses", ordinal=None, ) # Test the computed `investigation_time` # 2 lt realizations * 5 ses_per_logic_tree_path * 50.0 years self.assertEqual(500.0, complete_lt_ses.investigation_time) self.assertIsNone(complete_lt_ses.ordinal) # Now check for the correct number of hazard curves: curves = models.HazardCurve.objects.filter(output__oq_job=job) # ((2 IMTs * 2 real) + (2 IMTs * (1 mean + 2 quantiles))) = 10 # + 3 mean and quantiles multi-imt curves self.assertEqual(13, curves.count()) # Finally, check for the correct number of hazard maps: maps = models.HazardMap.objects.filter(output__oq_job=job) # ((2 poes * 2 realizations * 2 IMTs) # + (2 poes * 2 IMTs * (1 mean + 2 quantiles))) = 20 self.assertEqual(20, maps.count()) finally: self._unpatch_calc()
def get_hazard_job(self): if not self._get_queryset().exists(): warnings.warn("Computing Hazard input from scratch") completed_job = helpers.run_hazard_job( self._test_path('job_haz.ini')) self.assertEqual('complete', completed_job.status) return completed_job else: warnings.warn("Using existing Hazard input") return self._get_queryset().latest('oqjob__last_update').oqjob
def test_disagg_hazard_export(self): target_dir = tempfile.mkdtemp() try: cfg = helpers.get_data_path('disaggregation/job.ini') # run the calculation in process to create something to export os.environ['OQ_NO_DISTRIBUTE'] = '1' try: helpers.run_hazard_job(cfg) finally: del os.environ['OQ_NO_DISTRIBUTE'] job = models.OqJob.objects.latest('id') self.assertEqual(job.status, 'complete') outputs = export_core.get_outputs(job.id) # Test curve export: curves = outputs.filter(output_type='hazard_curve') self.assertEqual(4, len(curves)) curve_files = [] for curve in curves: curve_files.append(check_export(curve.id, target_dir)) self.assertEqual(4, len(curve_files)) for f in curve_files: self._test_exported_file(f) # Test disagg matrix export: matrices = outputs.filter(output_type='disagg_matrix') self.assertEqual(8, len(matrices)) disagg_files = [] for matrix in matrices: disagg_files.append(check_export(matrix.id, target_dir)) self.assertEqual(8, len(disagg_files)) for f in disagg_files: self._test_exported_file(f) finally: shutil.rmtree(target_dir)
def test_disagg_hazard_export(self): target_dir = tempfile.mkdtemp() try: cfg = helpers.get_data_path('disaggregation/job.ini') # run the calculation in process to create something to export os.environ['OQ_NO_DISTRIBUTE'] = '1' try: helpers.run_hazard_job(cfg) finally: del os.environ['OQ_NO_DISTRIBUTE'] job = models.OqJob.objects.latest('id') self.assertEqual(job.status, 'complete') outputs = export_core.get_outputs(job.id) # Test curve export: curves = outputs.filter(output_type='hazard_curve') self.assertEqual(4, len(curves)) curve_files = [] for curve in curves: curve_files.extend(check_export(curve.id, target_dir)) self.assertEqual(4, len(curve_files)) for f in curve_files: self._test_exported_file(f) # Test disagg matrix export: matrices = outputs.filter(output_type='disagg_matrix') self.assertEqual(8, len(matrices)) disagg_files = [] for matrix in matrices: disagg_files.extend(check_export(matrix.id, target_dir)) self.assertEqual(8, len(disagg_files)) for f in disagg_files: self._test_exported_file(f) finally: shutil.rmtree(target_dir)
def test_complete_event_based_calculation_cycle(self): self._patch_calc() try: from openquake.hazardlib import calc from openquake.engine.calculators.hazard.event_based import core ses_mock = calc.stochastic.stochastic_event_set_poissonian save_rup_mock = core._save_ses_ruptures # run the calculation in process (to easy debugging) # and check the outputs; notice that since the save_ses # part is mocked the gmf won't be computed os.environ['OQ_NO_DISTRIBUTE'] = '1' try: job = helpers.run_hazard_job(self.cfg) finally: del os.environ['OQ_NO_DISTRIBUTE'] hc = job.hazard_calculation rlz1, rlz2 = models.LtRealization.objects.filter( hazard_calculation=hc.id).order_by('ordinal') # check that the parameters are read correctly from the files self.assertEqual(hc.ses_per_logic_tree_path, 5) # check that we called the right number of times the patched # functions: 40 = 2 Lt * 4 sources * 5 ses = 8 tasks * 5 ses self.assertEqual(ses_mock.call_count, 40) self.assertEqual(save_rup_mock.call_count, 40) # 2 rupt per ses # Check the complete logic tree SES complete_lt_ses = models.SES.objects.get( ses_collection__output__oq_job=job.id, ses_collection__output__output_type='complete_lt_ses', ordinal=None) # Test the computed `investigation_time` # 2 lt realizations * 5 ses_per_logic_tree_path * 50.0 years self.assertEqual(500.0, complete_lt_ses.investigation_time) self.assertIsNone(complete_lt_ses.ordinal) # Now check for the correct number of hazard curves: curves = models.HazardCurve.objects.filter(output__oq_job=job) # ((2 IMTs * 2 real) + (2 IMTs * (1 mean + 2 quantiles))) = 10 # + 3 mean and quantiles multi-imt curves self.assertEqual(13, curves.count()) # Finally, check for the correct number of hazard maps: maps = models.HazardMap.objects.filter(output__oq_job=job) # ((2 poes * 2 realizations * 2 IMTs) # + (2 poes * 2 IMTs * (1 mean + 2 quantiles))) = 20 self.assertEqual(20, maps.count()) finally: self._unpatch_calc()
def test_complete_event_based_calculation_cycle(self): # run the calculation in process (to easy debugging) # and check the outputs os.environ['OQ_NO_DISTRIBUTE'] = '1' try: job = helpers.run_hazard_job(self.cfg) finally: del os.environ['OQ_NO_DISTRIBUTE'] hc = job.hazard_calculation rlz1, rlz2 = models.LtRealization.objects.filter( hazard_calculation=hc.id).order_by('ordinal') # check that the parameters are read correctly from the files self.assertEqual(hc.ses_per_logic_tree_path, 5) # check that we generated the right number of ruptures # (this is fixed if the seeds are fixed correctly) num_ruptures = models.SESRupture.objects.filter( ses__ses_collection__output__oq_job=job.id).count() self.assertEqual(num_ruptures, 404) # check that we generated the right number of rows in GmfData # 1210 = 121 sites * 5 ses * 2 IMTs num_gmf1 = models.GmfData.objects.filter( gmf__lt_realization=rlz1).count() num_gmf2 = models.GmfData.objects.filter( gmf__lt_realization=rlz2).count() self.assertEqual(num_gmf1, 1210) self.assertEqual(num_gmf2, 1210) # Check the complete logic tree SES complete_lt_ses = models.SES.objects.get( ses_collection__output__oq_job=job.id, ses_collection__output__output_type='complete_lt_ses', ordinal=None) # Test the computed `investigation_time` # 2 lt realizations * 5 ses_per_logic_tree_path * 50.0 years self.assertEqual(500.0, complete_lt_ses.investigation_time) self.assertIsNone(complete_lt_ses.ordinal) # Now check for the correct number of hazard curves: curves = models.HazardCurve.objects.filter(output__oq_job=job) # ((2 IMTs * 2 real) + (2 IMTs * (1 mean + 2 quantiles))) = 10 # + 3 mean and quantiles multi-imt curves self.assertEqual(13, curves.count()) # Finally, check for the correct number of hazard maps: maps = models.HazardMap.objects.filter(output__oq_job=job) # ((2 poes * 2 realizations * 2 IMTs) # + (2 poes * 2 IMTs * (1 mean + 2 quantiles))) = 20 self.assertEqual(20, maps.count())
def test(self): # The bug can be reproduced with any hazard calculation profile which # the following parameters set: # # * number_of_logic_tree_samples = 1 # * mean_hazard_curves = false # * quantile_hazard_curves = # * poes = at least one PoE cfg = helpers.get_data_path( 'calculators/hazard/classical/haz_map_1rlz_no_stats.ini' ) job = helpers.run_hazard_job(cfg) self.assertEqual(job.status, 'complete')
def run_hazard(self, cfg, exports=None): """ Given the path to job config file, run the job and assert that it was successful. If this assertion passes, return the completed job. :param str cfg: Path to a job config file. :param list exports: A list of export format types. Currently only 'xml' is supported. :returns: The completed :class:`~openquake.engine.db.models.OqJob`. :raises: :exc:`AssertionError` if the job was not successfully run. """ completed_job = helpers.run_hazard_job(cfg, exports=exports) self.assertEqual('complete', completed_job.status) return completed_job
def run_hazard(self, cfg, exports=None): """ Given the path to job config file, run the job and assert that it was successful. If this assertion passes, return the completed job. :param str cfg: Path to a job config file. :param list exports: A list of export format types. Currently only 'xml' is supported. :returns: The completed :class:`~openquake.engine.db.models.OqJob`. :raises: :exc:`AssertionError` if the job was not successfully run. """ # Set OQ_NO_DISTRIBUTE to true, so we can benefit from including these # tests in our code coverage with patch.dict("os.environ", {openquake.engine.NO_DISTRIBUTE_VAR: "1"}): completed_job = helpers.run_hazard_job(cfg, exports=exports) self.assertEqual("complete", completed_job.status) return completed_job
def test_export_for_event_based(self): # Run an event-based hazard calculation to compute SESs and GMFs # Call the exporters for both SES and GMF results and verify that # files were created # Since the XML writers (in `openquake.nrmllib.writers`) are concerned # with correctly generating the XML, we don't test that here... # but we should still have an end-to-end QA test. target_dir = tempfile.mkdtemp() try: cfg = helpers.get_data_path('event_based_hazard/job.ini') # run the calculation in process to create something to export os.environ['OQ_NO_DISTRIBUTE'] = '1' try: helpers.run_hazard_job(cfg) finally: del os.environ['OQ_NO_DISTRIBUTE'] job = models.OqJob.objects.latest('id') self.assertEqual(job.status, 'complete') outputs = export_core.get_outputs(job.id) # 2 GMFs, 2 SESs, 1 complete logic tree SES, 1 complete LT GMF, # ((2 imts * 2 realizations) # + ((2 imts + 1 multi) * (1 mean + 3 quantiles)) # hazard curves, # (2 poes * 2 imts * 2 realizations) # + (2 poes * 2 imts * (1 mean + 3 quantiles)) hazard maps # Total: 42 self.assertEqual(46, len(outputs)) ####### # SESs: ses_outputs = outputs.filter(output_type='ses') self.assertEqual(2, len(ses_outputs)) exported_files = [] for ses_output in ses_outputs: out_file = check_export(ses_output.id, target_dir) exported_files.append(out_file) self.assertEqual(2, len(exported_files)) for f in exported_files: self._test_exported_file(f) ################## # Complete LT SES: [complete_lt_ses] = outputs.filter(output_type='complete_lt_ses') exported_file = check_export(complete_lt_ses.id, target_dir) self._test_exported_file(exported_file) ####### # GMFs: gmf_outputs = outputs.filter(output_type='gmf') self.assertEqual(2, len(gmf_outputs)) exported_files = [] for gmf_output in gmf_outputs: out_file = check_export(gmf_output.id, target_dir) exported_files.append(out_file) self.assertEqual(2, len(exported_files)) # Check the file paths exist, are absolute, and the files aren't # empty. for f in exported_files: self._test_exported_file(f) ################## # Complete LT GMF: [complete_lt_gmf] = outputs.filter(output_type='complete_lt_gmf') exported_file = check_export(complete_lt_gmf.id, target_dir) self._test_exported_file(exported_file) # Check for the correct number of GMFs in the file: tree = etree.parse(exported_file) # NB: the number of generated gmfs depends on the number # of ruptures, which is stochastic number; even having fixed # the seed, it will change by changing the order in which the # stochastic functions are called; a test relying on that # precise number would be fragile, this is why here we just # check that there are gmfs (MS) self.assertGreater(number_of('nrml:gmf', tree), 0) ################ # Hazard curves: haz_curves = outputs.filter(output_type='hazard_curve') self.assertEqual(12, haz_curves.count()) for curve in haz_curves: exported_file = check_export(curve.id, target_dir) self._test_exported_file(exported_file) ############## # Hazard maps: haz_maps = outputs.filter(output_type='hazard_map') self.assertEqual(24, haz_maps.count()) for hmap in haz_maps: exported_file = check_export(hmap.id, target_dir) self._test_exported_file(exported_file) finally: shutil.rmtree(target_dir)
def test_classical_hazard_export(self): # Run a hazard calculation to compute some curves and maps # Call the exporter and verify that files were created # Since the hazard curve XML writer is concerned with correctly # generating XML, we won't test that here. target_dir = tempfile.mkdtemp() try: cfg = helpers.get_data_path('simple_fault_demo_hazard/job.ini') # run the calculation to create something to export helpers.run_hazard_job(cfg) job = models.OqJob.objects.latest('id') self.assertEqual(job.status, 'complete') outputs = export_core.get_outputs(job.id) # 10 hazard curves, 20 maps, 10 uhs, 5 multi curves expected_outputs = 45 self.assertEqual(expected_outputs, outputs.count()) # Number of curves: # (2 imts * 2 realizations) # + (2 imts * (1 mean + 2 quantiles) # = 10 curves = outputs.filter(output_type='hazard_curve') self.assertEqual(10, curves.count()) # Number of multi-curves # (2 realizations + 1 mean + 2 quantiles) multi_curves = outputs.filter(output_type="hazard_curve_multi") self.assertEqual(5, multi_curves.count()) # Number of maps: # (2 poes * 2 imts * 2 realizations) # + (2 poes * 2 imts * (1 mean + 2 quantiles)) # = 20 # Number of UHS: maps = outputs.filter(output_type='hazard_map') self.assertEqual(20, maps.count()) # Number of UHS: # (20 maps_PGA_SA / 2 poes) # = 10 uhs = outputs.filter(output_type='uh_spectra') self.assertEqual(10, uhs.count()) # Test hazard curve export: hc_files = [] for curve in curves: hc_files.append(check_export(curve.id, target_dir)) self.assertEqual(10, len(hc_files)) # Test multi hazard curve export: hc_files = [] for curve in multi_curves: hc_files.append(check_export(curve.id, target_dir)) self.assertEqual(5, len(hc_files)) for f in hc_files: self._test_exported_file(f) # Test hazard map export: hm_files = [] for haz_map in maps: hm_files.append(check_export(haz_map.id, target_dir)) self.assertEqual(20, len(hm_files)) for f in hm_files: self._test_exported_file(f) # Test UHS export: uhs_files = [] for u in uhs: uhs_files.append(check_export(u.id, target_dir)) for f in uhs_files: self._test_exported_file(f) finally: shutil.rmtree(target_dir)
def test_classical_hazard_export(self): # Run a hazard calculation to compute some curves and maps # Call the exporter and verify that files were created # Since the hazard curve XML writer is concerned with correctly # generating XML, we won't test that here. target_dir = tempfile.mkdtemp() try: cfg = helpers.get_data_path('simple_fault_demo_hazard/job.ini') # run the calculation to create something to export helpers.run_hazard_job(cfg) job = models.OqJob.objects.latest('id') self.assertEqual(job.status, 'complete') outputs = export_core.get_outputs(job.id) # 10 hazard curves, 20 maps, 10 uhs, 5 multi curves expected_outputs = 45 self.assertEqual(expected_outputs, outputs.count()) # Number of curves: # (2 imts * 2 realizations) # + (2 imts * (1 mean + 2 quantiles) # = 10 curves = outputs.filter(output_type='hazard_curve') self.assertEqual(10, curves.count()) # Number of multi-curves # (2 realizations + 1 mean + 2 quantiles) multi_curves = outputs.filter(output_type="hazard_curve_multi") self.assertEqual(5, multi_curves.count()) # Number of maps: # (2 poes * 2 imts * 2 realizations) # + (2 poes * 2 imts * (1 mean + 2 quantiles)) # = 20 # Number of UHS: maps = outputs.filter(output_type='hazard_map') self.assertEqual(20, maps.count()) # Number of UHS: # (20 maps_PGA_SA / 2 poes) # = 10 uhs = outputs.filter(output_type='uh_spectra') self.assertEqual(10, uhs.count()) # Test hazard curve export: hc_files = [] for curve in curves: hc_files.extend(check_export(curve.id, target_dir)) self.assertEqual(10, len(hc_files)) # Test multi hazard curve export: hc_files = [] for curve in multi_curves: hc_files.extend(hazard.export(curve.id, target_dir)) self.assertEqual(5, len(hc_files)) for f in hc_files: self._test_exported_file(f) # Test hazard map export: hm_files = [] for haz_map in maps: hm_files.extend(check_export(haz_map.id, target_dir)) self.assertEqual(20, len(hm_files)) for f in hm_files: self._test_exported_file(f) # Test UHS export: uhs_files = [] for u in uhs: uhs_files.extend(check_export(u.id, target_dir)) for f in uhs_files: self._test_exported_file(f) finally: shutil.rmtree(target_dir)
def test_export_for_event_based(self): # Run an event-based hazard calculation to compute SESs and GMFs # Call the exporters for both SES and GMF results and verify that # files were created # Since the XML writers (in `openquake.nrmllib.writers`) are concerned # with correctly generating the XML, we don't test that here... # but we should still have an end-to-end QA test. target_dir = tempfile.mkdtemp() try: cfg = helpers.get_data_path('event_based_hazard/job.ini') # run the calculation in process to create something to export os.environ['OQ_NO_DISTRIBUTE'] = '1' try: helpers.run_hazard_job(cfg) finally: del os.environ['OQ_NO_DISTRIBUTE'] job = models.OqJob.objects.latest('id') self.assertEqual(job.status, 'complete') outputs = export_core.get_outputs(job.id) # 2 GMFs, 2 SESs, 1 complete logic tree SES, 1 complete LT GMF, # ((2 imts * 2 realizations) # + ((2 imts + 1 multi) * (1 mean + 3 quantiles)) # hazard curves, # (2 poes * 2 imts * 2 realizations) # + (2 poes * 2 imts * (1 mean + 3 quantiles)) hazard maps # Total: 42 self.assertEqual(46, len(outputs)) ####### # SESs: ses_outputs = outputs.filter(output_type='ses') self.assertEqual(2, len(ses_outputs)) exported_files = [] for ses_output in ses_outputs: files = check_export(ses_output.id, target_dir) exported_files.extend(files) self.assertEqual(2, len(exported_files)) for f in exported_files: self._test_exported_file(f) ################## # Complete LT SES: [complete_lt_ses] = outputs.filter(output_type='complete_lt_ses') [exported_file] = check_export(complete_lt_ses.id, target_dir) self._test_exported_file(exported_file) ####### # GMFs: gmf_outputs = outputs.filter(output_type='gmf') self.assertEqual(2, len(gmf_outputs)) exported_files = [] for gmf_output in gmf_outputs: files = check_export(gmf_output.id, target_dir) exported_files.extend(files) self.assertEqual(2, len(exported_files)) # Check the file paths exist, are absolute, and the files aren't # empty. for f in exported_files: self._test_exported_file(f) ################## # Complete LT GMF: [complete_lt_gmf] = outputs.filter(output_type='complete_lt_gmf') [exported_file] = check_export(complete_lt_gmf.id, target_dir) self._test_exported_file(exported_file) # Check for the correct number of GMFs in the file: tree = etree.parse(exported_file) # NB: the number of generated gmfs depends on the number # of ruptures, which is stochastic number; even having fixed # the seed, it will change by changing the order in which the # stochastic functions are called; a test relying on that # precise number would be fragile, this is why here we just # check that there are gmfs (MS) self.assertGreater(number_of('nrml:gmf', tree), 0) ################ # Hazard curves: haz_curves = outputs.filter(output_type='hazard_curve') self.assertEqual(12, haz_curves.count()) for curve in haz_curves: [exported_file] = check_export(curve.id, target_dir) self._test_exported_file(exported_file) ############## # Hazard maps: haz_maps = outputs.filter(output_type='hazard_map') self.assertEqual(24, haz_maps.count()) for hmap in haz_maps: [exported_file] = check_export(hmap.id, target_dir) self._test_exported_file(exported_file) finally: shutil.rmtree(target_dir)
def setUpClass(cls): cfg = helpers.get_data_path( 'calculators/hazard/classical/haz_map_test_job2.ini') cls.job = helpers.run_hazard_job(cfg)
def setUpClass(cls): cfg = helpers.get_data_path( 'calculators/hazard/classical/haz_map_test_job2.ini') cls.job = helpers.run_hazard_job(cfg) models.JobStats.objects.create(oq_job=cls.job)
def test_classical_risk_export(self): target_dir = tempfile.mkdtemp() try: haz_cfg = helpers.get_data_path( 'end-to-end-hazard-risk/job_haz_classical.ini' ) risk_cfg = helpers.get_data_path( 'end-to-end-hazard-risk/job_risk_classical.ini' ) haz_job = helpers.run_hazard_job(haz_cfg) # Run the risk on all outputs produced by the haz calc: risk_job = helpers.run_risk_job( risk_cfg, hazard_calculation_id=haz_job.hazard_calculation.id ) risk_outputs = models.Output.objects.filter(oq_job=risk_job) loss_curve_outputs = risk_outputs.filter(output_type='loss_curve') loss_map_outputs = risk_outputs.filter(output_type='loss_map') # 16 logic tree realizations + 1 mean + 2 quantiles = 19 # + 19 insured loss curves self.assertEqual(38, loss_curve_outputs.count()) # make sure the mean and quantile curve sets got created correctly loss_curves = models.LossCurve.objects.filter( output__oq_job=risk_job, insured=False ) # sanity check self.assertEqual(19, loss_curves.count()) insured_curves = models.LossCurve.objects.filter( output__oq_job=risk_job, insured=True ) # sanity check self.assertEqual(19, insured_curves.count()) # mean self.assertEqual(1, loss_curves.filter(statistics='mean').count()) # quantiles self.assertEqual( 2, loss_curves.filter(statistics='quantile').count() ) # mean self.assertEqual( 1, insured_curves.filter(statistics='mean').count()) # quantiles self.assertEqual( 2, insured_curves.filter(statistics='quantile').count() ) # 16 logic tree realizations = 16 loss map + 1 mean loss # map + 2 quantile loss map self.assertEqual(19, loss_map_outputs.count()) # 19 loss fractions loss_fraction_outputs = risk_outputs.filter( output_type="loss_fraction") self.assertEqual(19, loss_fraction_outputs.count()) # Now try to export everything, just to do a "smoketest" of the # exporter code: loss_curve_files = [] for o in loss_curve_outputs: loss_curve_files.append(risk.export(o.id, target_dir, 'xml')) loss_map_files = [] for o in loss_map_outputs: loss_map_files.append(risk.export(o.id, target_dir, 'xml')) self.assertEqual(38, len(loss_curve_files)) self.assertEqual(19, len(loss_map_files)) for f in loss_curve_files: self._test_exported_file(f) for f in loss_map_files: self._test_exported_file(f) finally: shutil.rmtree(target_dir)
def test_event_based_risk_export(self): target_dir = tempfile.mkdtemp() try: haz_cfg = helpers.get_data_path( 'end-to-end-hazard-risk/job_haz_event_based.ini' ) risk_cfg = helpers.get_data_path( 'end-to-end-hazard-risk/job_risk_event_based.ini' ) haz_job = helpers.run_hazard_job(haz_cfg) # Run the risk on all outputs produced by the haz calc: risk_job = helpers.run_risk_job( risk_cfg, hazard_calculation_id=haz_job.hazard_calculation.id ) risk_outputs = models.Output.objects.filter(oq_job=risk_job) agg_loss_curve_outputs = risk_outputs.filter( output_type='agg_loss_curve') loss_curve_outputs = risk_outputs.filter(output_type='loss_curve') loss_map_outputs = risk_outputs.filter(output_type='loss_map') # (1 mean + 2 quantiles) * 2 (as there also insured curves) self.assertEqual(6, loss_curve_outputs.count()) # 16 rlzs + 16 (due to insured curves) event_loss_curve_outputs = risk_outputs.filter( output_type='event_loss_curve') self.assertEqual(32, event_loss_curve_outputs.count()) self.assertEqual(16, agg_loss_curve_outputs.count()) # make sure the mean and quantile curve sets got created correctly loss_curves = models.LossCurve.objects.filter( output__oq_job=risk_job ) # sanity check (16 aggregate loss curve + 38 loss curves) self.assertEqual(54, loss_curves.count()) # mean self.assertEqual(2, loss_curves.filter(statistics='mean').count()) # quantiles self.assertEqual( 4, loss_curves.filter(statistics='quantile').count() ) # 16 logic tree realizations = 16 loss map + 1 mean loss # map + 2 quantile loss map self.assertEqual(19, loss_map_outputs.count()) # 16 event loss table (1 per rlz) event_loss_tables = risk_outputs.filter(output_type="event_loss") self.assertEqual(16, event_loss_tables.count()) # 32 loss fractions loss_fraction_outputs = risk_outputs.filter( output_type="loss_fraction") self.assertEqual(32, loss_fraction_outputs.count()) # Now try to export everything, just to do a "smoketest" of the # exporter code: loss_curve_files = [] for o in loss_curve_outputs: loss_curve_files.append(risk.export(o.id, target_dir, 'xml')) for o in event_loss_curve_outputs: loss_curve_files.append(risk.export(o.id, target_dir, 'xml')) agg_loss_curve_files = [] for o in agg_loss_curve_outputs: agg_loss_curve_files.append( risk.export(o.id, target_dir, 'xml') ) event_loss_table_files = [] for o in event_loss_tables: event_loss_table_files.append( risk.export(o.id, target_dir, 'xml') ) loss_map_files = [] for o in loss_map_outputs: loss_map_files.append(risk.export(o.id, target_dir, 'xml')) self.assertEqual(38, len(loss_curve_files)) self.assertEqual(16, len(agg_loss_curve_files)) self.assertEqual(16, len(event_loss_table_files)) self.assertEqual(19, len(loss_map_files)) for f in loss_curve_files: self._test_exported_file(f) for f in loss_map_files: self._test_exported_file(f) finally: shutil.rmtree(target_dir)
def test_export_for_event_based(self): # Run an event-based hazard calculation to compute SESs and GMFs # Call the exporters for both SES and GMF results and verify that # files were created # Since the XML writers (in `openquake.nrmllib.writers`) are concerned # with correctly generating the XML, we don't test that here... # but we should still have an end-to-end QA test. target_dir = tempfile.mkdtemp() try: cfg = helpers.get_data_path('event_based_hazard/job.ini') # run the calculation in process to create something to export os.environ['OQ_NO_DISTRIBUTE'] = '1' try: helpers.run_hazard_job(cfg) finally: del os.environ['OQ_NO_DISTRIBUTE'] job = models.OqJob.objects.latest('id') self.assertEqual(job.status, 'complete') outputs = export_core.get_outputs(job.id) # 2 GMFs, 2 SESs, # ((2 imts * 2 realizations) # + ((2 imts + 1 multi) * (1 mean + 3 quantiles)) # hazard curves, # (2 poes * 2 imts * 2 realizations) # + (2 poes * 2 imts * (1 mean + 3 quantiles)) hazard maps # Total: 42 self.assertEqual(44, len(outputs)) ####### # SESs: ses_outputs = outputs.filter(output_type='ses') self.assertEqual(2, len(ses_outputs)) exported_files = [] for ses_output in ses_outputs: out_file = check_export(ses_output.id, target_dir) exported_files.append(out_file) self.assertEqual(2, len(exported_files)) for f in exported_files: self._test_exported_file(f) ####### # GMFs: gmf_outputs = outputs.filter(output_type='gmf') self.assertEqual(2, len(gmf_outputs)) exported_files = [] for gmf_output in gmf_outputs: out_file = check_export(gmf_output.id, target_dir) exported_files.append(out_file) self.assertEqual(2, len(exported_files)) # Check the file paths exist, are absolute, and the files aren't # empty. for f in exported_files: self._test_exported_file(f) ################ # Hazard curves: haz_curves = outputs.filter(output_type='hazard_curve') self.assertEqual(12, haz_curves.count()) for curve in haz_curves: exported_file = check_export(curve.id, target_dir) self._test_exported_file(exported_file) ############## # Hazard maps: haz_maps = outputs.filter(output_type='hazard_map') self.assertEqual(24, haz_maps.count()) for hmap in haz_maps: exported_file = check_export(hmap.id, target_dir) self._test_exported_file(exported_file) finally: shutil.rmtree(target_dir)