def test_export_for_scenario(self): target_dir = tempfile.mkdtemp() try: cfg = helpers.get_data_path('scenario_hazard/job.ini') # run the calculation in process to create something to export with mock.patch.dict(os.environ, {'OQ_NO_DISTRIBUTE': '1'}): helpers.run_job(cfg) job = models.OqJob.objects.latest('id') self.assertEqual(job.status, 'complete') outputs = core.get_outputs(job.id) gmf_outputs = outputs.filter(ds_key='gmfs') self.assertEqual(1, len(gmf_outputs)) exported_file = check_export(gmf_outputs[0].id, target_dir) # Check the file paths exist, is absolute, and the file isn't # empty. self._test_exported_file(exported_file) # Check for the correct number of GMFs in the file: tree = etree.parse(exported_file) self.assertEqual(20, number_of('nrml:gmf', tree)) finally: shutil.rmtree(target_dir)
def test_export_for_scenario(self): target_dir = tempfile.mkdtemp() try: cfg = helpers.get_data_path("scenario_hazard/job.ini") # run the calculation in process to create something to export os.environ["OQ_NO_DISTRIBUTE"] = "1" try: helpers.run_job(cfg) finally: del os.environ["OQ_NO_DISTRIBUTE"] job = models.OqJob.objects.latest("id") self.assertEqual(job.status, "complete") outputs = export_core.get_outputs(job.id) self.assertEqual(1, len(outputs)) # 1 GMF gmf_outputs = outputs.filter(output_type="gmf_scenario") self.assertEqual(1, len(gmf_outputs)) exported_file = check_export(gmf_outputs[0].id, target_dir) # Check the file paths exist, is absolute, and the file isn't # empty. self._test_exported_file(exported_file) # Check for the correct number of GMFs in the file: tree = etree.parse(exported_file) self.assertEqual(20, number_of("nrml:gmf", tree)) finally: shutil.rmtree(target_dir)
def test_export_for_scenario(self): target_dir = tempfile.mkdtemp() try: cfg = helpers.get_data_path('scenario_hazard/job.ini') # run the calculation in process to create something to export with mock.patch.dict(os.environ, {'OQ_NO_DISTRIBUTE': '1'}): helpers.run_job(cfg) job = models.OqJob.objects.latest('id') self.assertEqual(job.status, 'complete') outputs = core.get_outputs(job.id) self.assertEqual(2, len(outputs)) # 1 GMF, 1 SES gmf_outputs = outputs.filter(output_type='gmf_scenario') self.assertEqual(1, len(gmf_outputs)) exported_file = check_export(gmf_outputs[0].id, target_dir) # Check the file paths exist, is absolute, and the file isn't # empty. self._test_exported_file(exported_file) # Check for the correct number of GMFs in the file: tree = etree.parse(exported_file) self.assertEqual(20, number_of('nrml:gmf', tree)) finally: shutil.rmtree(target_dir)
def test(self): cfg = helpers.get_data_path('classical_job.ini') with mock.patch.dict(os.environ, {'OQ_NO_DISTRIBUTE': '1'}), \ mock.patch('openquake.engine.logs.LOG.warn') as warn: # using a small maximum distance of 1 km, so that no sources # are found, and checking that no realizations are generated helpers.run_job(cfg, maximum_distance=1) self.assertEqual(warn.call_args[0][0], 'No realizations for hazard_calculation_id=%d')
def test(self): cfg = helpers.get_data_path('simple_fault_demo_hazard/job.ini') with mock.patch('openquake.engine.logs.LOG.warn') as warn: helpers.run_job(cfg, number_of_logic_tree_samples=1, quantile_hazard_curves='0.1 0.2', hazard_maps=None, uniform_hazard_spectra=None) msg = warn.call_args[0][0] self.assertEqual(msg, 'There is only one realization, the configuration' ' parameter quantile_hazard_curves should not be set')
def test(self): cfg = helpers.get_data_path('simple_fault_demo_hazard/job.ini') with mock.patch('openquake.engine.logs.LOG.warn') as warn: helpers.run_job(cfg, number_of_logic_tree_samples=1, quantile_hazard_curves='0.1 0.2', hazard_maps='', uniform_hazard_spectra='') msg = warn.call_args[0][0] self.assertEqual( msg, 'There is only one realization, the configuration' ' parameter quantile_hazard_curves should not be set')
def get_hazard_job(self): try: job = models.JobParam.objects.filter( name='description', value__contains=self.hazard_calculation_fixture, job__status="complete").latest('id').job except ObjectDoesNotExist: warnings.warn("Computing Hazard input from scratch") job = helpers.run_job( self._test_path('job_haz.ini')).job self.assertEqual('complete', job.status) else: warnings.warn("Using existing Hazard input") if self.save_load: # Close the opened transactions saved_calculation = save_hazards.main(job.id) # FIXME Here on, to avoid deadlocks due to stale # transactions, we commit all the opened transactions. We # should find who is responsible for the eventual opened # transaction connection = models.getcursor('job_init').connection if connection is not None: connection.commit() [load_calculation] = load_hazards.hazard_load( models.getcursor('admin').connection, saved_calculation) return models.OqJob.objects.get(pk=load_calculation) else: return job
def get_hazard_job(self): try: job = models.JobParam.objects.filter( name='description', value__contains=self.hazard_calculation_fixture, job__status="complete").latest('id').job except ObjectDoesNotExist: warnings.warn("Computing Hazard input from scratch") job = helpers.run_job( self._test_path('job_haz.ini')) self.assertEqual('complete', job.status) else: warnings.warn("Using existing Hazard input") if self.save_load: # Close the opened transactions saved_calculation = save_hazards.main(job.id) # FIXME Here on, to avoid deadlocks due to stale # transactions, we commit all the opened transactions. We # should find who is responsible for the eventual opened # transaction connection = models.getcursor('job_init').connection if connection is not None: connection.commit() [load_calculation] = load_hazards.hazard_load( models.getcursor('admin').connection, saved_calculation) return models.OqJob.objects.get(pk=load_calculation) else: return job
def get_hazard_job(self): if not self._get_queryset().exists(): warnings.warn("Computing Hazard input from scratch") job = helpers.run_job( self._test_path('job_haz.ini')) self.assertEqual('complete', job.status) else: warnings.warn("Using existing Hazard input") job = self._get_queryset().latest('oqjob__last_update').oqjob if self.save_load: # Close the opened transactions saved_calculation = save_hazards.main(job.hazard_calculation.id) # FIXME Here on, to avoid deadlocks due to stale # transactions, we commit all the opened transactions. We # should find who is responsible for the eventual opened # transaction connection = models.getcursor('job_init').connection if connection is not None: connection.commit() [load_calculation] = load_hazards.hazard_load( models.getcursor('admin').connection, saved_calculation) return models.OqJob.objects.get( hazard_calculation__id=load_calculation) else: return job
def test(self): # The bug can be reproduced with any hazard calculation profile which # the following parameters set: # # * number_of_logic_tree_samples = 1 # * mean_hazard_curves = false # * quantile_hazard_curves = # * poes = at least one PoE cfg = helpers.get_data_path("calculators/hazard/classical/haz_map_1rlz_no_stats.ini") job = helpers.run_job(cfg) self.assertEqual(job.status, "complete")
def test_disagg_hazard_export(self): target_dir = tempfile.mkdtemp() try: cfg = helpers.get_data_path('disaggregation/job.ini') # run the calculation in process to create something to export os.environ['OQ_NO_DISTRIBUTE'] = '1' try: helpers.run_job(cfg) finally: del os.environ['OQ_NO_DISTRIBUTE'] job = models.OqJob.objects.latest('id') self.assertEqual(job.status, 'complete') outputs = core.get_outputs(job.id) # Test curve export: curves = outputs.filter(output_type='hazard_curve') self.assertEqual(4, len(curves)) curve_files = [] for curve in curves: curve_files.append(check_export(curve.id, target_dir)) self.assertEqual(4, len(curve_files)) for f in curve_files: self._test_exported_file(f) # Test disagg matrix export: matrices = outputs.filter(output_type='disagg_matrix') self.assertEqual(8, len(matrices)) disagg_files = [] for matrix in matrices: disagg_files.append(check_export(matrix.id, target_dir)) self.assertEqual(8, len(disagg_files)) for f in disagg_files: self._test_exported_file(f) finally: shutil.rmtree(target_dir)
def test_disagg_hazard_export(self): target_dir = tempfile.mkdtemp() try: cfg = helpers.get_data_path('disaggregation/job.ini') # run the calculation in process to create something to export os.environ['OQ_NO_DISTRIBUTE'] = '1' try: helpers.run_job(cfg) finally: del os.environ['OQ_NO_DISTRIBUTE'] job = models.OqJob.objects.latest('id') self.assertEqual(job.status, 'complete') outputs = export_core.get_outputs(job.id) # Test curve export: curves = outputs.filter(output_type='hazard_curve') self.assertEqual(4, len(curves)) curve_files = [] for curve in curves: curve_files.append(check_export(curve.id, target_dir)) self.assertEqual(4, len(curve_files)) for f in curve_files: self._test_exported_file(f) # Test disagg matrix export: matrices = outputs.filter(output_type='disagg_matrix') self.assertEqual(8, len(matrices)) disagg_files = [] for matrix in matrices: disagg_files.append(check_export(matrix.id, target_dir)) self.assertEqual(8, len(disagg_files)) for f in disagg_files: self._test_exported_file(f) finally: shutil.rmtree(target_dir)
def test(self): # The bug can be reproduced with any hazard calculation profile which # the following parameters set: # # * number_of_logic_tree_samples = 1 # * mean_hazard_curves = false # * quantile_hazard_curves = # * poes = at least one PoE cfg = helpers.get_data_path( 'calculators/hazard/classical/haz_map_1rlz_no_stats.ini') job = helpers.run_job(cfg).job self.assertEqual(job.status, 'complete')
def run_hazard(self, cfg, exports=''): """ Given the path to job config file, run the job and assert that it was successful. If this assertion passes, return the completed job. :param str cfg: Path to a job config file. :returns: The completed :class:`~openquake.engine.db.models.OqJob`. :raises: :exc:`AssertionError` if the job was not successfully run. """ completed_job = helpers.run_job(cfg, exports=exports).job self.assertEqual('complete', completed_job.status) return completed_job
def run_hazard(self, cfg, exports=None): """ Given the path to job config file, run the job and assert that it was successful. If this assertion passes, return the completed job. :param str cfg: Path to a job config file. :param list exports: A list of export format types. Currently only 'xml' is supported. :returns: The completed :class:`~openquake.engine.db.models.OqJob`. :raises: :exc:`AssertionError` if the job was not successfully run. """ completed_job = helpers.run_job(cfg, exports=exports) self.assertEqual("complete", completed_job.status) return completed_job
def run_risk(self, cfg, hazard_id): """ Given the path to job config file, run the job and assert that it was successful. If this assertion passes, return the completed job. :param str cfg: Path to a job config file. :param int hazard_id: ID of the hazard output used by the risk calculation :returns: The completed :class:`~openquake.engine.db.models.OqJob`. :raises: :exc:`AssertionError` if the job was not successfully run. """ completed_job = helpers.run_job(cfg, hazard_output_id=hazard_id) self.assertEqual('complete', completed_job.status) return completed_job
def run_risk(self, cfg, hazard_id): """ Given the path to job config file, run the job and assert that it was successful. If this assertion passes, return the completed job. :param str cfg: Path to a job config file. :param int hazard_id: ID of the hazard output used by the risk calculation :returns: The completed :class:`~openquake.engine.db.models.OqJob`. :raises: :exc:`AssertionError` if the job was not successfully run. """ completed_job = helpers.run_job(cfg, hazard_output_id=hazard_id).job self.assertEqual('complete', completed_job.status) return completed_job
def test_complete_event_based_calculation_cycle(self): # run the calculation in process (to easy debugging) # and check the outputs with mock.patch.dict(os.environ, {'OQ_NO_DISTRIBUTE': '1'}): job = helpers.run_job(self.cfg) hc = job.hazard_calculation [rlz1, rlz2] = models.LtRealization.objects.filter( lt_model__hazard_calculation=hc.id) # check that the parameters are read correctly from the files self.assertEqual(hc.ses_per_logic_tree_path, 5) # check that we generated the right number of ruptures # (this is fixed if the seeds are fixed correctly) num_ruptures = models.SESRupture.objects.filter( rupture__ses_collection__output__oq_job=job.id).count() self.assertEqual(num_ruptures, 94) num_gmf1 = models.GmfData.objects.filter( gmf__lt_realization=rlz1).count() num_gmf2 = models.GmfData.objects.filter( gmf__lt_realization=rlz2).count() # check that we generated the same number of rows in GmfData # for both realizations self.assertEqual(num_gmf1, num_gmf2) # check that the number of tasks is a multiple of # 242 = 121 sites * 2 IMTs self.assertEqual(num_gmf1 % 242, 0) # Now check for the correct number of hazard curves: curves = models.HazardCurve.objects.filter(output__oq_job=job) # ((2 IMTs * 2 rlz) + (2 IMTs * (1 mean + 2 quantiles))) = 10 # + 6 multi-imt curves (3 quantiles + 1 mean + 2 rlz) self.assertEqual(15, curves.count()) # Finally, check for the correct number of hazard maps: maps = models.HazardMap.objects.filter(output__oq_job=job) # ((2 poes * 2 realizations * 2 IMTs) # + (2 poes * 2 IMTs * (1 mean + 2 quantiles))) = 20 self.assertEqual(20, maps.count())
def test_complete_event_based_calculation_cycle(self): # run the calculation in process (to easy debugging) # and check the outputs with mock.patch.dict(os.environ, {'OQ_NO_DISTRIBUTE': '1'}): job = helpers.run_job(self.cfg) hc = job.hazard_calculation [(rlz1, rlz2)] = models.LtSourceModel.objects.filter( hazard_calculation=hc.id) # check that the parameters are read correctly from the files self.assertEqual(hc.ses_per_logic_tree_path, 5) # check that we generated the right number of ruptures # (this is fixed if the seeds are fixed correctly) num_ruptures = models.SESRupture.objects.filter( rupture__ses_collection__output__oq_job=job.id).count() self.assertEqual(num_ruptures, 96) # check that we generated the right number of rows in GmfData # 242 = 121 sites * 2 IMTs num_gmf1 = models.GmfData.objects.filter( gmf__lt_realization=rlz1).count() num_gmf2 = models.GmfData.objects.filter( gmf__lt_realization=rlz2).count() # with concurrent_tasks=64, this test generates 17 tasks, but # only 15 gives nonzero contribution self.assertEqual(num_gmf1, 242 * 15) self.assertEqual(num_gmf2, 242 * 15) # Now check for the correct number of hazard curves: curves = models.HazardCurve.objects.filter(output__oq_job=job) # ((2 IMTs * 2 real) + (2 IMTs * (1 mean + 2 quantiles))) = 10 # + 3 mean and quantiles multi-imt curves self.assertEqual(13, curves.count()) # Finally, check for the correct number of hazard maps: maps = models.HazardMap.objects.filter(output__oq_job=job) # ((2 poes * 2 realizations * 2 IMTs) # + (2 poes * 2 IMTs * (1 mean + 2 quantiles))) = 20 self.assertEqual(20, maps.count())
def test_export_for_event_based(self): # Run an event-based hazard calculation to compute SESs and GMFs # Call the exporters for both SES and GMF results and verify that # files were created # Since the XML writers (in `openquake.commonlib`) are concerned # with correctly generating the XML, we don't test that here target_dir = tempfile.mkdtemp() try: cfg = helpers.get_data_path('event_based_hazard/job.ini') # run the calculation in process to create something to export with mock.patch.dict(os.environ, {'OQ_NO_DISTRIBUTE': '1'}): job = helpers.run_job(cfg, maximum_distance=1, ses_per_logic_tree_path=1, investigation_time=12, number_of_logic_tree_samples=1).job self.assertEqual(job.status, 'complete') dstore = datastore.DataStore(job.id) # 1 SES + 1 GMF + 1 hazard_curve_multi + 2 hazard_curve + # 4 hazard maps (with poes 0.1, 0.2 and IMT PGA, SA(0.1)) outputs = core.get_outputs(job.id) # SESs ses_outputs = outputs.filter(ds_key='sescollection') self.assertEqual(1, len(ses_outputs)) exported_files = [] for ses_output in ses_outputs: out_file = check_export(ses_output.id, target_dir) exported_files.append(out_file) self.assertEqual(1, len(exported_files)) for f in exported_files: self._test_exported_file(f) # GMFs gmf_outputs = outputs.filter(ds_key='gmfs') self.assertEqual(1, len(gmf_outputs)) exported_files = [] for gmf_output in gmf_outputs: out_file = check_export(gmf_output.id, target_dir) exported_files.append(out_file) self.assertEqual(1, len(exported_files)) # Check the file paths exist, are absolute, and the files aren't # empty. for f in exported_files: self._test_exported_file(f) # check the exact values of the GMFs gmfs = writers.write_csv( io.StringIO(), dstore['gmfs']['col00'].value).encode('utf8') self.check_file_content('expected_gmfset_1.txt', gmfs) # Hazard curves haz_curves = outputs.filter(ds_key='hcurves') self.assertEqual(1, haz_curves.count()) for curve in haz_curves: exported_file = check_export(curve.id, target_dir) self._test_exported_file(exported_file) # Hazard maps haz_maps = outputs.filter(ds_key='hmaps') self.assertEqual(1, haz_maps.count()) for hmap in haz_maps: exported_file = check_export(hmap.id, target_dir) self._test_exported_file(exported_file) finally: shutil.rmtree(target_dir)
def test_event_based_risk_export(self): target_dir = tempfile.mkdtemp() try: haz_cfg = helpers.get_data_path( 'end-to-end-hazard-risk/job_haz_event_based.ini' ) risk_cfg = helpers.get_data_path( 'end-to-end-hazard-risk/job_risk_event_based.ini' ) haz_job = helpers.run_job(haz_cfg).job # Run the risk on all outputs produced by the haz calc: risk_job = helpers.run_job( risk_cfg, hazard_calculation_id=haz_job.id).job risk_outputs = models.Output.objects.filter(oq_job=risk_job) agg_loss_curve_outputs = risk_outputs.filter( output_type='agg_loss_curve') loss_curve_outputs = risk_outputs.filter(output_type='loss_curve') loss_map_outputs = risk_outputs.filter(output_type='loss_map') # (1 mean + 2 quantiles) * 2 (as there also insured curves) self.assertEqual(6, loss_curve_outputs.count()) # 16 rlzs + 16 (due to insured curves) event_loss_curve_outputs = risk_outputs.filter( output_type='event_loss_curve') self.assertEqual(32, event_loss_curve_outputs.count()) self.assertEqual(16, agg_loss_curve_outputs.count()) # make sure the mean and quantile curve sets got created correctly loss_curves = models.LossCurve.objects.filter( output__oq_job=risk_job ) # sanity check (16 aggregate loss curve + 38 loss curves) self.assertEqual(54, loss_curves.count()) # mean self.assertEqual(2, loss_curves.filter(statistics='mean').count()) # quantiles self.assertEqual( 4, loss_curves.filter(statistics='quantile').count() ) # 16 logic tree realizations = 16 loss map + 1 mean loss # map + 2 quantile loss map self.assertEqual(19, loss_map_outputs.count()) # 16 event loss table (1 per rlz) event_loss_tables = risk_outputs.filter(output_type="event_loss") self.assertEqual(16, event_loss_tables.count()) # 32 loss fractions loss_fraction_outputs = risk_outputs.filter( output_type="loss_fraction") self.assertEqual(32, loss_fraction_outputs.count()) # Now try to export everything, just to do a "smoketest" of the # exporter code: loss_curve_files = [] for o in loss_curve_outputs: loss_curve_files.append(core.export(o.id, target_dir, 'xml')) for o in loss_fraction_outputs: loss_curve_files.append(core.export(o.id, target_dir, 'xml')) for o in event_loss_curve_outputs: loss_curve_files.append(core.export(o.id, target_dir, 'xml')) agg_loss_curve_files = [] for o in agg_loss_curve_outputs: agg_loss_curve_files.append( core.export(o.id, target_dir, 'xml') ) event_loss_table_files = [] for o in event_loss_tables: event_loss_table_files.append( core.export(o.id, target_dir, 'csv') ) loss_map_files = [] for o in loss_map_outputs: loss_map_files.append(core.export(o.id, target_dir, 'xml')) self.assertEqual(70, len(loss_curve_files)) self.assertEqual(16, len(agg_loss_curve_files)) self.assertEqual(16, len(event_loss_table_files)) self.assertEqual(19, len(loss_map_files)) for f in loss_curve_files: self._test_exported_file(f) for f in loss_map_files: self._test_exported_file(f) finally: shutil.rmtree(target_dir)
def test_classical_risk_export(self): target_dir = tempfile.mkdtemp() try: haz_cfg = helpers.get_data_path( 'end-to-end-hazard-risk/job_haz_classical.ini' ) risk_cfg = helpers.get_data_path( 'end-to-end-hazard-risk/job_risk_classical.ini' ) haz_job = helpers.run_job(haz_cfg).job # Run the risk on all outputs produced by the haz calc: risk_job = helpers.run_job( risk_cfg, hazard_calculation_id=haz_job.id).job risk_outputs = models.Output.objects.filter(oq_job=risk_job) loss_curve_outputs = risk_outputs.filter(output_type='loss_curve') loss_map_outputs = risk_outputs.filter(output_type='loss_map') # 16 logic tree realizations + 1 mean + 2 quantiles = 19 # + 19 insured loss curves self.assertEqual(38, loss_curve_outputs.count()) # make sure the mean and quantile curve sets got created correctly loss_curves = models.LossCurve.objects.filter( output__oq_job=risk_job, insured=False ) # sanity check self.assertEqual(19, loss_curves.count()) insured_curves = models.LossCurve.objects.filter( output__oq_job=risk_job, insured=True ) # sanity check self.assertEqual(19, insured_curves.count()) # mean self.assertEqual(1, loss_curves.filter(statistics='mean').count()) # quantiles self.assertEqual( 2, loss_curves.filter(statistics='quantile').count() ) # mean self.assertEqual( 1, insured_curves.filter(statistics='mean').count()) # quantiles self.assertEqual( 2, insured_curves.filter(statistics='quantile').count() ) # 16 logic tree realizations = 16 loss map + 1 mean loss # map + 2 quantile loss map self.assertEqual(19, loss_map_outputs.count()) # 19 loss fractions loss_fraction_outputs = risk_outputs.filter( output_type="loss_fraction") self.assertEqual(19, loss_fraction_outputs.count()) # Now try to export everything, just to do a "smoketest" of the # exporter code: loss_curve_files = [] for o in loss_curve_outputs: loss_curve_files.append(core.export(o.id, target_dir, 'xml')) loss_map_files = [] for o in loss_map_outputs: loss_map_files.append(core.export(o.id, target_dir, 'xml')) self.assertEqual(38, len(loss_curve_files)) self.assertEqual(19, len(loss_map_files)) for f in loss_curve_files: self._test_exported_file(f) for f in loss_map_files: self._test_exported_file(f) finally: shutil.rmtree(target_dir)
def test(self): cfg = helpers.get_data_path('classical_job.ini') with mock.patch.dict(os.environ, {'OQ_NO_DISTRIBUTE': '1'}): with self.assertRaises(RuntimeError): helpers.run_job(cfg, maximum_distance=1)
def setUpClass(cls): cfg = helpers.get_data_path( 'calculators/hazard/classical/haz_map_test_job2.ini') cls.job = helpers.run_job(cfg).job models.JobStats.objects.create(oq_job=cls.job) cls.monitor = EnginePerformanceMonitor('', cls.job.id)
def test_classical_risk_export(self): target_dir = tempfile.mkdtemp() try: haz_cfg = helpers.get_data_path( 'end-to-end-hazard-risk/job_haz_classical.ini') risk_cfg = helpers.get_data_path( 'end-to-end-hazard-risk/job_risk_classical.ini') haz_job = helpers.run_job(haz_cfg).job # Run the risk on all outputs produced by the haz calc: risk_job = helpers.run_job(risk_cfg, hazard_calculation_id=haz_job.id).job risk_outputs = models.Output.objects.filter(oq_job=risk_job) loss_curve_outputs = risk_outputs.filter(output_type='loss_curve') loss_map_outputs = risk_outputs.filter(output_type='loss_map') # 16 logic tree realizations + 1 mean + 2 quantiles = 19 # + 19 insured loss curves self.assertEqual(38, loss_curve_outputs.count()) # make sure the mean and quantile curve sets got created correctly loss_curves = models.LossCurve.objects.filter( output__oq_job=risk_job, insured=False) # sanity check self.assertEqual(19, loss_curves.count()) insured_curves = models.LossCurve.objects.filter( output__oq_job=risk_job, insured=True) # sanity check self.assertEqual(19, insured_curves.count()) # mean self.assertEqual(1, loss_curves.filter(statistics='mean').count()) # quantiles self.assertEqual(2, loss_curves.filter(statistics='quantile').count()) # mean self.assertEqual(1, insured_curves.filter(statistics='mean').count()) # quantiles self.assertEqual( 2, insured_curves.filter(statistics='quantile').count()) # 16 logic tree realizations = 16 loss map + 1 mean loss # map + 2 quantile loss map self.assertEqual(19, loss_map_outputs.count()) # 19 loss fractions loss_fraction_outputs = risk_outputs.filter( output_type="loss_fraction") self.assertEqual(19, loss_fraction_outputs.count()) # Now try to export everything, just to do a "smoketest" of the # exporter code: loss_curve_files = [] for o in loss_curve_outputs: loss_curve_files.append(core.export(o.id, target_dir, 'xml')) loss_map_files = [] for o in loss_map_outputs: loss_map_files.append(core.export(o.id, target_dir, 'xml')) self.assertEqual(38, len(loss_curve_files)) self.assertEqual(19, len(loss_map_files)) for f in loss_curve_files: self._test_exported_file(f) for f in loss_map_files: self._test_exported_file(f) finally: shutil.rmtree(target_dir)
def test_export_for_event_based(self): # Run an event-based hazard calculation to compute SESs and GMFs # Call the exporters for both SES and GMF results and verify that # files were created # Since the XML writers (in `openquake.nrmllib.writers`) are concerned # with correctly generating the XML, we don't test that here... # but we should still have an end-to-end QA test. target_dir = tempfile.mkdtemp() try: cfg = helpers.get_data_path("event_based_hazard/job.ini") # run the calculation in process to create something to export os.environ["OQ_NO_DISTRIBUTE"] = "1" try: job = helpers.run_job(cfg) finally: del os.environ["OQ_NO_DISTRIBUTE"] self.assertEqual(job.status, "complete") outputs = export_core.get_outputs(job.id) # 2 GMFs, 1 SES, # ((2 imts * 2 realizations) # + ((2 imts + 1 multi) * (1 mean + 3 quantiles)) # hazard curves, # (2 poes * 2 imts * 2 realizations) # + (2 poes * 2 imts * (1 mean + 3 quantiles)) hazard maps # Total: 41 self.assertEqual(43, len(outputs)) ####### # SESs: ses_outputs = outputs.filter(output_type="ses") self.assertEqual(1, len(ses_outputs)) exported_files = [] for ses_output in ses_outputs: out_file = check_export(ses_output.id, target_dir) exported_files.append(out_file) self.assertEqual(1, len(exported_files)) for f in exported_files: self._test_exported_file(f) ####### # GMFs: gmf_outputs = outputs.filter(output_type="gmf") self.assertEqual(2, len(gmf_outputs)) exported_files = [] for gmf_output in gmf_outputs: out_file = check_export(gmf_output.id, target_dir) exported_files.append(out_file) self.assertEqual(2, len(exported_files)) # Check the file paths exist, are absolute, and the files aren't # empty. for f in exported_files: self._test_exported_file(f) ################ # Hazard curves: haz_curves = outputs.filter(output_type="hazard_curve") self.assertEqual(12, haz_curves.count()) for curve in haz_curves: exported_file = check_export(curve.id, target_dir) self._test_exported_file(exported_file) ############## # Hazard maps: haz_maps = outputs.filter(output_type="hazard_map") self.assertEqual(24, haz_maps.count()) for hmap in haz_maps: exported_file = check_export(hmap.id, target_dir) self._test_exported_file(exported_file) finally: shutil.rmtree(target_dir)
def test_export_for_event_based(self): # Run an event-based hazard calculation to compute SESs and GMFs # Call the exporters for both SES and GMF results and verify that # files were created # Since the XML writers (in `openquake.commonlib`) are concerned # with correctly generating the XML, we don't test that here target_dir = tempfile.mkdtemp() try: cfg = helpers.get_data_path('event_based_hazard/job.ini') # run the calculation in process to create something to export with mock.patch.dict(os.environ, {'OQ_NO_DISTRIBUTE': '1'}): job = helpers.run_job(cfg, maximum_distance=1, ses_per_logic_tree_path=1, number_of_logic_tree_samples=1).job self.assertEqual(job.status, 'complete') # 1 SES + 1 GMF + 1 hazard_curve_multi + 2 hazard_curve + # 4 hazard maps (with poes 0.1, 0.2 and IMT PGA, SA(0.1)) outputs = core.get_outputs(job.id) self.assertEqual(9, len(outputs)) # SESs ses_outputs = outputs.filter(output_type='ses') self.assertEqual(1, len(ses_outputs)) exported_files = [] for ses_output in ses_outputs: out_file = check_export(ses_output.id, target_dir) exported_files.append(out_file) self.assertEqual(1, len(exported_files)) for f in exported_files: self._test_exported_file(f) # GMFs gmf_outputs = outputs.filter(output_type='gmf') self.assertEqual(1, len(gmf_outputs)) exported_files = [] for gmf_output in gmf_outputs: out_file = check_export(gmf_output.id, target_dir) exported_files.append(out_file) self.assertEqual(1, len(exported_files)) # Check the file paths exist, are absolute, and the files aren't # empty. for f in exported_files: self._test_exported_file(f) # check the exact values of the GMFs [gmfset1] = gmf_outputs[0].gmf self.check_file_content('expected_gmfset_1.txt', str(gmfset1)) # Hazard curves haz_curves = outputs.filter(output_type='hazard_curve') self.assertEqual(2, haz_curves.count()) for curve in haz_curves: exported_file = check_export(curve.id, target_dir) self._test_exported_file(exported_file) # Hazard maps haz_maps = outputs.filter(output_type='hazard_map') self.assertEqual(4, haz_maps.count()) for hmap in haz_maps: exported_file = check_export(hmap.id, target_dir) self._test_exported_file(exported_file) finally: shutil.rmtree(target_dir)
def test_classical_hazard_export(self): # Run a hazard calculation to compute some curves and maps # Call the exporter and verify that files were created # Since the hazard curve XML writer is concerned with correctly # generating XML, we won't test that here. target_dir = tempfile.mkdtemp() try: cfg = helpers.get_data_path('simple_fault_demo_hazard/job.ini') # run the calculation to create something to export helpers.run_job(cfg) job = models.OqJob.objects.latest('id') self.assertEqual(job.status, 'complete') outputs = core.get_outputs(job.id) # 10 hazard curves, 20 maps, 10 uhs, 5 multi curves expected_outputs = 45 self.assertEqual(expected_outputs, outputs.count()) # Number of curves: # (2 imts * 2 realizations) # + (2 imts * (1 mean + 2 quantiles) # = 10 curves = outputs.filter(output_type='hazard_curve') self.assertEqual(10, curves.count()) # Number of multi-curves # (2 realizations + 1 mean + 2 quantiles) multi_curves = outputs.filter(output_type="hazard_curve_multi") self.assertEqual(5, multi_curves.count()) # Number of maps: # (2 poes * 2 imts * 2 realizations) # + (2 poes * 2 imts * (1 mean + 2 quantiles)) # = 20 # Number of UHS: maps = outputs.filter(output_type='hazard_map') self.assertEqual(20, maps.count()) # Number of UHS: # (20 maps_PGA_SA / 2 poes) # = 10 uhs = outputs.filter(output_type='uh_spectra') self.assertEqual(10, uhs.count()) # Test hazard curve export: hc_files = [] for curve in curves: hc_files.append(check_export(curve.id, target_dir)) self.assertEqual(10, len(hc_files)) # Test multi hazard curve export: hc_files = [] for curve in multi_curves: hc_files.append(check_export(curve.id, target_dir)) self.assertEqual(5, len(hc_files)) for f in hc_files: self._test_exported_file(f) # Test hazard map export: hm_files = [] for haz_map in maps: hm_files.append(check_export(haz_map.id, target_dir)) self.assertEqual(20, len(hm_files)) for f in hm_files: self._test_exported_file(f) # Test UHS export: uhs_files = [] for u in uhs: uhs_files.append(check_export(u.id, target_dir)) for f in uhs_files: self._test_exported_file(f) finally: shutil.rmtree(target_dir)
def test_export_for_event_based(self): # Run an event-based hazard calculation to compute SESs and GMFs # Call the exporters for both SES and GMF results and verify that # files were created # Since the XML writers (in `openquake.nrmllib.writers`) are concerned # with correctly generating the XML, we don't test that here... # but we should still have an end-to-end QA test. target_dir = tempfile.mkdtemp() try: cfg = helpers.get_data_path('event_based_hazard/job.ini') # run the calculation in process to create something to export with mock.patch.dict(os.environ, {'OQ_NO_DISTRIBUTE': '1'}): job = helpers.run_job(cfg, maximum_distance=1, ses_per_logic_tree_path=1) self.assertEqual(job.status, 'complete') outputs = export_core.get_outputs(job.id) # 2 GMFs, 1 SES, # ((2 imts * 2 realizations) self.assertEqual(45, len(outputs)) ####### # SESs: ses_outputs = outputs.filter(output_type='ses') self.assertEqual(1, len(ses_outputs)) exported_files = [] for ses_output in ses_outputs: out_file = check_export(ses_output.id, target_dir) exported_files.append(out_file) self.assertEqual(1, len(exported_files)) for f in exported_files: self._test_exported_file(f) ####### # GMFs: gmf_outputs = outputs.filter(output_type='gmf') self.assertEqual(2, len(gmf_outputs)) exported_files = [] for gmf_output in gmf_outputs: out_file = check_export(gmf_output.id, target_dir) exported_files.append(out_file) self.assertEqual(2, len(exported_files)) # Check the file paths exist, are absolute, and the files aren't # empty. for f in exported_files: self._test_exported_file(f) # check the exact values of the GMFs [gmfset1] = gmf_outputs[0].gmf [gmfset2] = gmf_outputs[1].gmf self.check_file_content('expected_gmfset_1.txt', str(gmfset1)) self.check_file_content('expected_gmfset_2.txt', str(gmfset2)) ################ # Hazard curves: haz_curves = outputs.filter(output_type='hazard_curve') self.assertEqual(12, haz_curves.count()) for curve in haz_curves: exported_file = check_export(curve.id, target_dir) self._test_exported_file(exported_file) ############## # Hazard maps: haz_maps = outputs.filter(output_type='hazard_map') self.assertEqual(24, haz_maps.count()) for hmap in haz_maps: exported_file = check_export(hmap.id, target_dir) self._test_exported_file(exported_file) finally: shutil.rmtree(target_dir)
def test_event_based_risk_export(self): target_dir = tempfile.mkdtemp() try: haz_cfg = helpers.get_data_path( 'end-to-end-hazard-risk/job_haz_event_based.ini') risk_cfg = helpers.get_data_path( 'end-to-end-hazard-risk/job_risk_event_based.ini') haz_job = helpers.run_job(haz_cfg).job # Run the risk on all outputs produced by the haz calc: risk_job = helpers.run_job(risk_cfg, hazard_calculation_id=haz_job.id).job risk_outputs = models.Output.objects.filter(oq_job=risk_job) agg_loss_curve_outputs = risk_outputs.filter( output_type='agg_loss_curve') loss_curve_outputs = risk_outputs.filter(output_type='loss_curve') loss_map_outputs = risk_outputs.filter(output_type='loss_map') # (1 mean + 2 quantiles) * 2 (as there also insured curves) self.assertEqual(6, loss_curve_outputs.count()) # 16 rlzs + 16 (due to insured curves) event_loss_curve_outputs = risk_outputs.filter( output_type='event_loss_curve') self.assertEqual(32, event_loss_curve_outputs.count()) self.assertEqual(16, agg_loss_curve_outputs.count()) # make sure the mean and quantile curve sets got created correctly loss_curves = models.LossCurve.objects.filter( output__oq_job=risk_job) # sanity check (16 aggregate loss curve + 38 loss curves) self.assertEqual(54, loss_curves.count()) # mean self.assertEqual(2, loss_curves.filter(statistics='mean').count()) # quantiles self.assertEqual(4, loss_curves.filter(statistics='quantile').count()) # 16 logic tree realizations = 16 loss map + 1 mean loss # map + 2 quantile loss map self.assertEqual(19, loss_map_outputs.count()) # 16 event loss table (1 per rlz) event_loss_tables = risk_outputs.filter(output_type="event_loss") self.assertEqual(16, event_loss_tables.count()) # 32 loss fractions loss_fraction_outputs = risk_outputs.filter( output_type="loss_fraction") self.assertEqual(32, loss_fraction_outputs.count()) # Now try to export everything, just to do a "smoketest" of the # exporter code: loss_curve_files = [] for o in loss_curve_outputs: loss_curve_files.append(core.export(o.id, target_dir, 'xml')) for o in loss_fraction_outputs: loss_curve_files.append(core.export(o.id, target_dir, 'xml')) for o in event_loss_curve_outputs: loss_curve_files.append(core.export(o.id, target_dir, 'xml')) agg_loss_curve_files = [] for o in agg_loss_curve_outputs: agg_loss_curve_files.append( core.export(o.id, target_dir, 'xml')) event_loss_table_files = [] for o in event_loss_tables: event_loss_table_files.append( core.export(o.id, target_dir, 'csv')) loss_map_files = [] for o in loss_map_outputs: loss_map_files.append(core.export(o.id, target_dir, 'xml')) self.assertEqual(70, len(loss_curve_files)) self.assertEqual(16, len(agg_loss_curve_files)) self.assertEqual(16, len(event_loss_table_files)) self.assertEqual(19, len(loss_map_files)) for f in loss_curve_files: self._test_exported_file(f) for f in loss_map_files: self._test_exported_file(f) finally: shutil.rmtree(target_dir)
def setUpClass(cls): cfg = helpers.get_data_path( 'calculators/hazard/classical/haz_map_test_job2.ini') cls.job = helpers.run_job(cfg).job models.JobStats.objects.create(oq_job=cls.job)